From 476d4af22cec8a9ebc90137712e5ab7070b7379d Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Fri, 3 Oct 2014 17:25:00 +0100 Subject: iio: inkern: add iio_read_channel_average_raw Add iio_read_channel_average_raw to support reading averaged raw values in consumer drivers. Signed-off-by: Sebastian Reichel Signed-off-by: Jonathan Cameron --- include/linux/iio/consumer.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 2752b1fd12be..651f9a0e2765 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -122,6 +122,19 @@ struct iio_channel int iio_read_channel_raw(struct iio_channel *chan, int *val); +/** + * iio_read_channel_average_raw() - read from a given channel + * @chan: The channel being queried. + * @val: Value read back. + * + * Note raw reads from iio channels are in adc counts and hence + * scale will need to be applied if standard units required. + * + * In opposit to the normal iio_read_channel_raw this function + * returns the average of multiple reads. + */ +int iio_read_channel_average_raw(struct iio_channel *chan, int *val); + /** * iio_read_channel_processed() - read processed value from a given channel * @chan: The channel being queried. -- cgit v1.2.3 From 9fb6bf02e3ad04c20edb8e46536ce3eeda32c736 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Mon, 7 Apr 2014 13:39:33 -0400 Subject: HID: rmi: introduce RMI driver for Synaptics touchpads This driver add support for RMI4 over USB or I2C. The current state is that it uses its own RMI4 implementation, but once RMI4 is merged upstream, the driver will be a transport driver for the RMI4 library. Part of this driver should be considered as temporary. Most of the RMI4 processing and input handling will be deleted at some point. I based my work on Andrew's regarding its port of RMI4 over HID (see https://github.com/mightybigcar/synaptics-rmi4/tree/rmihid ) This repo presents how the driver may looks like at the end: https://github.com/mightybigcar/synaptics-rmi4/blob/rmihid/drivers/input/rmi4/rmi_hid.c Without this temporary solution, the workaround we gave to users is to disable i2c-hid, which leads to disabling the touchscreen on the XPS 11 and 12 (Haswell generation). Related bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1048314 https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1218973 Signed-off-by: Andrew Duggan Signed-off-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/Kconfig | 8 + drivers/hid/Makefile | 1 + drivers/hid/hid-core.c | 2 + drivers/hid/hid-rmi.c | 889 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/hid.h | 2 + 5 files changed, 902 insertions(+) create mode 100644 drivers/hid/hid-rmi.c (limited to 'include/linux') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 7af9d0b5dea1..762f15d6ed88 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -657,6 +657,14 @@ config HID_SUNPLUS ---help--- Support for Sunplus wireless desktop. +config HID_RMI + tristate "Synaptics RMI4 device support" + depends on HID + ---help--- + Support for Synaptics RMI4 touchpads. + Say Y here if you have a Synaptics RMI4 touchpads over i2c-hid or usbhid + and want support for its special functionalities. + config HID_GREENASIA tristate "GreenAsia (Product ID 0x12) game controller support" depends on HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index fc712dde02a4..a6fa6baf368e 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -97,6 +97,7 @@ obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \ hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \ hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \ hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o +obj-$(CONFIG_HID_RMI) += hid-rmi.o obj-$(CONFIG_HID_SAITEK) += hid-saitek.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9e8064205bc7..f05255d92de7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1882,6 +1882,8 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, + { HID_I2C_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c new file mode 100644 index 000000000000..699d631c6920 --- /dev/null +++ b/drivers/hid/hid-rmi.c @@ -0,0 +1,889 @@ +/* + * Copyright (c) 2013 Andrew Duggan + * Copyright (c) 2013 Synaptics Incorporated + * Copyright (c) 2014 Benjamin Tissoires + * Copyright (c) 2014 Red Hat, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hid-ids.h" + +#define RMI_MOUSE_REPORT_ID 0x01 /* Mouse emulation Report */ +#define RMI_WRITE_REPORT_ID 0x09 /* Output Report */ +#define RMI_READ_ADDR_REPORT_ID 0x0a /* Output Report */ +#define RMI_READ_DATA_REPORT_ID 0x0b /* Input Report */ +#define RMI_ATTN_REPORT_ID 0x0c /* Input Report */ +#define RMI_SET_RMI_MODE_REPORT_ID 0x0f /* Feature Report */ + +/* flags */ +#define RMI_READ_REQUEST_PENDING BIT(0) +#define RMI_READ_DATA_PENDING BIT(1) +#define RMI_STARTED BIT(2) + +enum rmi_mode_type { + RMI_MODE_OFF = 0, + RMI_MODE_ATTN_REPORTS = 1, + RMI_MODE_NO_PACKED_ATTN_REPORTS = 2, +}; + +struct rmi_function { + unsigned page; /* page of the function */ + u16 query_base_addr; /* base address for queries */ + u16 command_base_addr; /* base address for commands */ + u16 control_base_addr; /* base address for controls */ + u16 data_base_addr; /* base address for datas */ + unsigned int interrupt_base; /* cross-function interrupt number + * (uniq in the device)*/ + unsigned int interrupt_count; /* number of interrupts */ + unsigned int report_size; /* size of a report */ + unsigned long irq_mask; /* mask of the interrupts + * (to be applied against ATTN IRQ) */ +}; + +/** + * struct rmi_data - stores information for hid communication + * + * @page_mutex: Locks current page to avoid changing pages in unexpected ways. + * @page: Keeps track of the current virtual page + * + * @wait: Used for waiting for read data + * + * @writeReport: output buffer when writing RMI registers + * @readReport: input buffer when reading RMI registers + * + * @input_report_size: size of an input report (advertised by HID) + * @output_report_size: size of an output report (advertised by HID) + * + * @flags: flags for the current device (started, reading, etc...) + * + * @f11: placeholder of internal RMI function F11 description + * @f30: placeholder of internal RMI function F30 description + * + * @max_fingers: maximum finger count reported by the device + * @max_x: maximum x value reported by the device + * @max_y: maximum y value reported by the device + * + * @gpio_led_count: count of GPIOs + LEDs reported by F30 + * @button_count: actual physical buttons count + * @button_mask: button mask used to decode GPIO ATTN reports + * @button_state_mask: pull state of the buttons + * + * @input: pointer to the kernel input device + * + * @reset_work: worker which will be called in case of a mouse report + * @hdev: pointer to the struct hid_device + */ +struct rmi_data { + struct mutex page_mutex; + int page; + + wait_queue_head_t wait; + + u8 *writeReport; + u8 *readReport; + + int input_report_size; + int output_report_size; + + unsigned long flags; + + struct rmi_function f11; + struct rmi_function f30; + + unsigned int max_fingers; + unsigned int max_x; + unsigned int max_y; + unsigned int x_size_mm; + unsigned int y_size_mm; + + unsigned int gpio_led_count; + unsigned int button_count; + unsigned long button_mask; + unsigned long button_state_mask; + + struct input_dev *input; + + struct work_struct reset_work; + struct hid_device *hdev; +}; + +#define RMI_PAGE(addr) (((addr) >> 8) & 0xff) + +static int rmi_write_report(struct hid_device *hdev, u8 *report, int len); + +/** + * rmi_set_page - Set RMI page + * @hdev: The pointer to the hid_device struct + * @page: The new page address. + * + * RMI devices have 16-bit addressing, but some of the physical + * implementations (like SMBus) only have 8-bit addressing. So RMI implements + * a page address at 0xff of every page so we can reliable page addresses + * every 256 registers. + * + * The page_mutex lock must be held when this function is entered. + * + * Returns zero on success, non-zero on failure. + */ +static int rmi_set_page(struct hid_device *hdev, u8 page) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + int retval; + + data->writeReport[0] = RMI_WRITE_REPORT_ID; + data->writeReport[1] = 1; + data->writeReport[2] = 0xFF; + data->writeReport[4] = page; + + retval = rmi_write_report(hdev, data->writeReport, + data->output_report_size); + if (retval != data->output_report_size) { + dev_err(&hdev->dev, + "%s: set page failed: %d.", __func__, retval); + return retval; + } + + data->page = page; + return 0; +} + +static int rmi_set_mode(struct hid_device *hdev, u8 mode) +{ + int ret; + u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; + + ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, + sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + if (ret < 0) { + dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, + ret); + return ret; + } + + return 0; +} + +static int rmi_write_report(struct hid_device *hdev, u8 *report, int len) +{ + int ret; + + ret = hid_hw_output_report(hdev, (void *)report, len); + if (ret < 0) { + dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret); + return ret; + } + + return ret; +} + +static int rmi_read_block(struct hid_device *hdev, u16 addr, void *buf, + const int len) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + int ret; + int bytes_read; + int bytes_needed; + int retries; + int read_input_count; + + mutex_lock(&data->page_mutex); + + if (RMI_PAGE(addr) != data->page) { + ret = rmi_set_page(hdev, RMI_PAGE(addr)); + if (ret < 0) + goto exit; + } + + for (retries = 5; retries > 0; retries--) { + data->writeReport[0] = RMI_READ_ADDR_REPORT_ID; + data->writeReport[1] = 0; /* old 1 byte read count */ + data->writeReport[2] = addr & 0xFF; + data->writeReport[3] = (addr >> 8) & 0xFF; + data->writeReport[4] = len & 0xFF; + data->writeReport[5] = (len >> 8) & 0xFF; + + set_bit(RMI_READ_REQUEST_PENDING, &data->flags); + + ret = rmi_write_report(hdev, data->writeReport, + data->output_report_size); + if (ret != data->output_report_size) { + clear_bit(RMI_READ_REQUEST_PENDING, &data->flags); + dev_err(&hdev->dev, + "failed to write request output report (%d)\n", + ret); + goto exit; + } + + bytes_read = 0; + bytes_needed = len; + while (bytes_read < len) { + if (!wait_event_timeout(data->wait, + test_bit(RMI_READ_DATA_PENDING, &data->flags), + msecs_to_jiffies(1000))) { + hid_warn(hdev, "%s: timeout elapsed\n", + __func__); + ret = -EAGAIN; + break; + } + + read_input_count = data->readReport[1]; + memcpy(buf + bytes_read, &data->readReport[2], + read_input_count < bytes_needed ? + read_input_count : bytes_needed); + + bytes_read += read_input_count; + bytes_needed -= read_input_count; + clear_bit(RMI_READ_DATA_PENDING, &data->flags); + } + + if (ret >= 0) { + ret = 0; + break; + } + } + +exit: + clear_bit(RMI_READ_REQUEST_PENDING, &data->flags); + mutex_unlock(&data->page_mutex); + return ret; +} + +static inline int rmi_read(struct hid_device *hdev, u16 addr, void *buf) +{ + return rmi_read_block(hdev, addr, buf, 1); +} + +static void rmi_f11_process_touch(struct rmi_data *hdata, int slot, + u8 finger_state, u8 *touch_data) +{ + int x, y, wx, wy; + int wide, major, minor; + int z; + + input_mt_slot(hdata->input, slot); + input_mt_report_slot_state(hdata->input, MT_TOOL_FINGER, + finger_state == 0x01); + if (finger_state == 0x01) { + x = (touch_data[0] << 4) | (touch_data[2] & 0x07); + y = (touch_data[1] << 4) | (touch_data[2] >> 4); + wx = touch_data[3] & 0x07; + wy = touch_data[3] >> 4; + wide = (wx > wy); + major = max(wx, wy); + minor = min(wx, wy); + z = touch_data[4]; + + /* y is inverted */ + y = hdata->max_y - y; + + input_event(hdata->input, EV_ABS, ABS_MT_POSITION_X, x); + input_event(hdata->input, EV_ABS, ABS_MT_POSITION_Y, y); + input_event(hdata->input, EV_ABS, ABS_MT_ORIENTATION, wide); + input_event(hdata->input, EV_ABS, ABS_MT_PRESSURE, z); + input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); + input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); + } +} + +static void rmi_reset_work(struct work_struct *work) +{ + struct rmi_data *hdata = container_of(work, struct rmi_data, + reset_work); + + /* switch the device to RMI if we receive a generic mouse report */ + rmi_set_mode(hdata->hdev, RMI_MODE_ATTN_REPORTS); +} + +static inline int rmi_schedule_reset(struct hid_device *hdev) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + return schedule_work(&hdata->reset_work); +} + +static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data, + int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + int offset; + int i; + + if (size < hdata->f11.report_size) + return 0; + + if (!(irq & hdata->f11.irq_mask)) + return 0; + + offset = (hdata->max_fingers >> 2) + 1; + for (i = 0; i < hdata->max_fingers; i++) { + int fs_byte_position = i >> 2; + int fs_bit_position = (i & 0x3) << 1; + int finger_state = (data[fs_byte_position] >> fs_bit_position) & + 0x03; + + rmi_f11_process_touch(hdata, i, finger_state, + &data[offset + 5 * i]); + } + input_mt_sync_frame(hdata->input); + input_sync(hdata->input); + return hdata->f11.report_size; +} + +static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data, + int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + int i; + int button = 0; + bool value; + + if (!(irq & hdata->f30.irq_mask)) + return 0; + + for (i = 0; i < hdata->gpio_led_count; i++) { + if (test_bit(i, &hdata->button_mask)) { + value = (data[i / 8] >> (i & 0x07)) & BIT(0); + if (test_bit(i, &hdata->button_state_mask)) + value = !value; + input_event(hdata->input, EV_KEY, BTN_LEFT + button++, + value); + } + } + return hdata->f30.report_size; +} + +static int rmi_input_event(struct hid_device *hdev, u8 *data, int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + unsigned long irq_mask = 0; + unsigned index = 2; + + if (!(test_bit(RMI_STARTED, &hdata->flags))) + return 0; + + irq_mask |= hdata->f11.irq_mask; + irq_mask |= hdata->f30.irq_mask; + + if (data[1] & ~irq_mask) + hid_warn(hdev, "unknown intr source:%02lx %s:%d\n", + data[1] & ~irq_mask, __FILE__, __LINE__); + + if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) { + index += rmi_f11_input_event(hdev, data[1], &data[index], + size - index); + index += rmi_f30_input_event(hdev, data[1], &data[index], + size - index); + } else { + index += rmi_f30_input_event(hdev, data[1], &data[index], + size - index); + index += rmi_f11_input_event(hdev, data[1], &data[index], + size - index); + } + + return 1; +} + +static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + + if (!test_bit(RMI_READ_REQUEST_PENDING, &hdata->flags)) { + hid_err(hdev, "no read request pending\n"); + return 0; + } + + memcpy(hdata->readReport, data, size < hdata->input_report_size ? + size : hdata->input_report_size); + set_bit(RMI_READ_DATA_PENDING, &hdata->flags); + wake_up(&hdata->wait); + + return 1; +} + +static int rmi_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + switch (data[0]) { + case RMI_READ_DATA_REPORT_ID: + return rmi_read_data_event(hdev, data, size); + case RMI_ATTN_REPORT_ID: + return rmi_input_event(hdev, data, size); + case RMI_MOUSE_REPORT_ID: + rmi_schedule_reset(hdev); + break; + } + + return 0; +} + +static int rmi_post_reset(struct hid_device *hdev) +{ + return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); +} + +static int rmi_post_resume(struct hid_device *hdev) +{ + return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); +} + +#define RMI4_MAX_PAGE 0xff +#define RMI4_PAGE_SIZE 0x0100 + +#define PDT_START_SCAN_LOCATION 0x00e9 +#define PDT_END_SCAN_LOCATION 0x0005 +#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff) + +struct pdt_entry { + u8 query_base_addr:8; + u8 command_base_addr:8; + u8 control_base_addr:8; + u8 data_base_addr:8; + u8 interrupt_source_count:3; + u8 bits3and4:2; + u8 function_version:2; + u8 bit7:1; + u8 function_number:8; +} __attribute__((__packed__)); + +static inline unsigned long rmi_gen_mask(unsigned irq_base, unsigned irq_count) +{ + return GENMASK(irq_count + irq_base - 1, irq_base); +} + +static void rmi_register_function(struct rmi_data *data, + struct pdt_entry *pdt_entry, int page, unsigned interrupt_count) +{ + struct rmi_function *f = NULL; + u16 page_base = page << 8; + + switch (pdt_entry->function_number) { + case 0x11: + f = &data->f11; + break; + case 0x30: + f = &data->f30; + break; + } + + if (f) { + f->page = page; + f->query_base_addr = page_base | pdt_entry->query_base_addr; + f->command_base_addr = page_base | pdt_entry->command_base_addr; + f->control_base_addr = page_base | pdt_entry->control_base_addr; + f->data_base_addr = page_base | pdt_entry->data_base_addr; + f->interrupt_base = interrupt_count; + f->interrupt_count = pdt_entry->interrupt_source_count; + f->irq_mask = rmi_gen_mask(f->interrupt_base, + f->interrupt_count); + } +} + +static int rmi_scan_pdt(struct hid_device *hdev) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + struct pdt_entry entry; + int page; + bool page_has_function; + int i; + int retval; + int interrupt = 0; + u16 page_start, pdt_start , pdt_end; + + hid_info(hdev, "Scanning PDT...\n"); + + for (page = 0; (page <= RMI4_MAX_PAGE); page++) { + page_start = RMI4_PAGE_SIZE * page; + pdt_start = page_start + PDT_START_SCAN_LOCATION; + pdt_end = page_start + PDT_END_SCAN_LOCATION; + + page_has_function = false; + for (i = pdt_start; i >= pdt_end; i -= sizeof(entry)) { + retval = rmi_read_block(hdev, i, &entry, sizeof(entry)); + if (retval) { + hid_err(hdev, + "Read of PDT entry at %#06x failed.\n", + i); + goto error_exit; + } + + if (RMI4_END_OF_PDT(entry.function_number)) + break; + + page_has_function = true; + + hid_info(hdev, "Found F%02X on page %#04x\n", + entry.function_number, page); + + rmi_register_function(data, &entry, page, interrupt); + interrupt += entry.interrupt_source_count; + } + + if (!page_has_function) + break; + } + + hid_info(hdev, "%s: Done with PDT scan.\n", __func__); + retval = 0; + +error_exit: + return retval; +} + +static int rmi_populate_f11(struct hid_device *hdev) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + u8 buf[20]; + int ret; + bool has_query12; + bool has_physical_props; + unsigned x_size, y_size; + + if (!data->f11.query_base_addr) { + hid_err(hdev, "No 2D sensor found, giving up.\n"); + return -ENODEV; + } + + /* query 0 contains some useful information */ + ret = rmi_read(hdev, data->f11.query_base_addr, buf); + if (ret) { + hid_err(hdev, "can not get query 0: %d.\n", ret); + return ret; + } + has_query12 = !!(buf[0] & BIT(5)); + + /* query 1 to get the max number of fingers */ + ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf); + if (ret) { + hid_err(hdev, "can not get NumberOfFingers: %d.\n", ret); + return ret; + } + data->max_fingers = (buf[0] & 0x07) + 1; + if (data->max_fingers > 5) + data->max_fingers = 10; + + data->f11.report_size = data->max_fingers * 5 + + DIV_ROUND_UP(data->max_fingers, 4); + + if (!(buf[0] & BIT(4))) { + hid_err(hdev, "No absolute events, giving up.\n"); + return -ENODEV; + } + + /* + * query 12 to know if the physical properties are reported + * (query 12 is at offset 10 for HID devices) + */ + if (has_query12) { + ret = rmi_read(hdev, data->f11.query_base_addr + 10, buf); + if (ret) { + hid_err(hdev, "can not get query 12: %d.\n", ret); + return ret; + } + has_physical_props = !!(buf[0] & BIT(5)); + + if (has_physical_props) { + ret = rmi_read_block(hdev, + data->f11.query_base_addr + 11, buf, 4); + if (ret) { + hid_err(hdev, "can not read query 15-18: %d.\n", + ret); + return ret; + } + + x_size = buf[0] | (buf[1] << 8); + y_size = buf[2] | (buf[3] << 8); + + data->x_size_mm = DIV_ROUND_CLOSEST(x_size, 10); + data->y_size_mm = DIV_ROUND_CLOSEST(y_size, 10); + + hid_info(hdev, "%s: size in mm: %d x %d\n", + __func__, data->x_size_mm, data->y_size_mm); + } + } + + /* retrieve the ctrl registers */ + ret = rmi_read_block(hdev, data->f11.control_base_addr, buf, 20); + if (ret) { + hid_err(hdev, "can not read ctrl block of size 20: %d.\n", ret); + return ret; + } + + data->max_x = buf[6] | (buf[7] << 8); + data->max_y = buf[8] | (buf[9] << 8); + + return 0; +} + +static int rmi_populate_f30(struct hid_device *hdev) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + u8 buf[20]; + int ret; + bool has_gpio, has_led; + unsigned bytes_per_ctrl; + u8 ctrl2_addr; + int ctrl2_3_length; + int i; + + /* function F30 is for physical buttons */ + if (!data->f30.query_base_addr) { + hid_err(hdev, "No GPIO/LEDs found, giving up.\n"); + return -ENODEV; + } + + ret = rmi_read_block(hdev, data->f30.query_base_addr, buf, 2); + if (ret) { + hid_err(hdev, "can not get F30 query registers: %d.\n", ret); + return ret; + } + + has_gpio = !!(buf[0] & BIT(3)); + has_led = !!(buf[0] & BIT(2)); + data->gpio_led_count = buf[1] & 0x1f; + + /* retrieve ctrl 2 & 3 registers */ + bytes_per_ctrl = (data->gpio_led_count + 7) / 8; + /* Ctrl0 is present only if both has_gpio and has_led are set*/ + ctrl2_addr = (has_gpio && has_led) ? bytes_per_ctrl : 0; + /* Ctrl1 is always be present */ + ctrl2_addr += bytes_per_ctrl; + ctrl2_3_length = 2 * bytes_per_ctrl; + + data->f30.report_size = bytes_per_ctrl; + + ret = rmi_read_block(hdev, data->f30.control_base_addr + ctrl2_addr, + buf, ctrl2_3_length); + if (ret) { + hid_err(hdev, "can not read ctrl 2&3 block of size %d: %d.\n", + ctrl2_3_length, ret); + return ret; + } + + for (i = 0; i < data->gpio_led_count; i++) { + int byte_position = i >> 3; + int bit_position = i & 0x07; + u8 dir_byte = buf[byte_position]; + u8 data_byte = buf[byte_position + bytes_per_ctrl]; + bool dir = (dir_byte >> bit_position) & BIT(0); + bool dat = (data_byte >> bit_position) & BIT(0); + + if (dir == 0) { + /* input mode */ + if (dat) { + /* actual buttons have pull up resistor */ + data->button_count++; + set_bit(i, &data->button_mask); + set_bit(i, &data->button_state_mask); + } + } + + } + + return 0; +} + +static int rmi_populate(struct hid_device *hdev) +{ + int ret; + + ret = rmi_scan_pdt(hdev); + if (ret) { + hid_err(hdev, "PDT scan failed with code %d.\n", ret); + return ret; + } + + ret = rmi_populate_f11(hdev); + if (ret) { + hid_err(hdev, "Error while initializing F11 (%d).\n", ret); + return ret; + } + + ret = rmi_populate_f30(hdev); + if (ret) + hid_warn(hdev, "Error while initializing F30 (%d).\n", ret); + + return 0; +} + +static void rmi_input_configured(struct hid_device *hdev, struct hid_input *hi) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + struct input_dev *input = hi->input; + int ret; + int res_x, res_y, i; + + data->input = input; + + hid_dbg(hdev, "Opening low level driver\n"); + ret = hid_hw_open(hdev); + if (ret) + return; + + /* Allow incoming hid reports */ + hid_device_io_start(hdev); + + ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); + if (ret < 0) { + dev_err(&hdev->dev, "failed to set rmi mode\n"); + goto exit; + } + + ret = rmi_set_page(hdev, 0); + if (ret < 0) { + dev_err(&hdev->dev, "failed to set page select to 0.\n"); + goto exit; + } + + ret = rmi_populate(hdev); + if (ret) + goto exit; + + __set_bit(EV_ABS, input->evbit); + input_set_abs_params(input, ABS_MT_POSITION_X, 1, data->max_x, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 1, data->max_y, 0, 0); + + if (data->x_size_mm && data->x_size_mm) { + res_x = (data->max_x - 1) / data->x_size_mm; + res_y = (data->max_y - 1) / data->x_size_mm; + + input_abs_set_res(input, ABS_MT_POSITION_X, res_x); + input_abs_set_res(input, ABS_MT_POSITION_Y, res_y); + } + + input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0); + input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0); + + input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER); + + if (data->button_count) { + __set_bit(EV_KEY, input->evbit); + for (i = 0; i < data->button_count; i++) + __set_bit(BTN_LEFT + i, input->keybit); + + if (data->button_count == 1) + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + } + + set_bit(RMI_STARTED, &data->flags); + +exit: + hid_device_io_stop(hdev); + hid_hw_close(hdev); +} + +static int rmi_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + /* we want to make HID ignore the advertised HID collection */ + return -1; +} + +static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct rmi_data *data = NULL; + int ret; + size_t alloc_size; + + data = devm_kzalloc(&hdev->dev, sizeof(struct rmi_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + INIT_WORK(&data->reset_work, rmi_reset_work); + data->hdev = hdev; + + hid_set_drvdata(hdev, data); + + hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + return ret; + } + + data->input_report_size = (hdev->report_enum[HID_INPUT_REPORT] + .report_id_hash[RMI_ATTN_REPORT_ID]->size >> 3) + + 1 /* report id */; + data->output_report_size = (hdev->report_enum[HID_OUTPUT_REPORT] + .report_id_hash[RMI_WRITE_REPORT_ID]->size >> 3) + + 1 /* report id */; + + alloc_size = data->output_report_size + data->input_report_size; + + data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL); + if (!data->writeReport) { + ret = -ENOMEM; + return ret; + } + + data->readReport = data->writeReport + data->output_report_size; + + init_waitqueue_head(&data->wait); + + mutex_init(&data->page_mutex); + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed\n"); + return ret; + } + + if (!test_bit(RMI_STARTED, &data->flags)) { + hid_hw_stop(hdev); + return -EIO; + } + + hid_hw_stop(hdev); + return 0; +} + +static void rmi_remove(struct hid_device *hdev) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + + clear_bit(RMI_STARTED, &hdata->flags); + + hid_hw_stop(hdev); +} + +static const struct hid_device_id rmi_id[] = { + { HID_I2C_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, + { } +}; +MODULE_DEVICE_TABLE(hid, rmi_id); + +static struct hid_driver rmi_driver = { + .name = "hid-rmi", + .id_table = rmi_id, + .probe = rmi_probe, + .remove = rmi_remove, + .raw_event = rmi_raw_event, + .input_mapping = rmi_input_mapping, + .input_configured = rmi_input_configured, +#ifdef CONFIG_PM + .resume = rmi_post_resume, + .reset_resume = rmi_post_reset, +#endif +}; + +module_hid_driver(rmi_driver); + +MODULE_AUTHOR("Andrew Duggan "); +MODULE_DESCRIPTION("RMI HID driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/hid.h b/include/linux/hid.h index 720e3a10608c..54f855b2c902 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -570,6 +570,8 @@ struct hid_descriptor { .bus = BUS_USB, .vendor = (ven), .product = (prod) #define HID_BLUETOOTH_DEVICE(ven, prod) \ .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod) +#define HID_I2C_DEVICE(ven, prod) \ + .bus = BUS_I2C, .vendor = (ven), .product = (prod) #define HID_REPORT_ID(rep) \ .report_type = (rep) -- cgit v1.2.3 From 59c3d45e487315e6e05a3f2310b61109f8e503e7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Apr 2014 09:15:35 -0600 Subject: block: remove 'q' parameter from kblockd_schedule_*_work() The queue parameter is never used, just get rid of it. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 +++--- block/blk-flush.c | 2 +- block/blk-mq.c | 7 ++----- block/cfq-iosched.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 4 ++-- 6 files changed, 10 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 34d7c196338b..f7d2c3335dfa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2904,14 +2904,14 @@ free_and_out: } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) +int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *dwork, unsigned long delay) +int kblockd_schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) { return queue_delayed_work(kblockd_workqueue, dwork, delay); } diff --git a/block/blk-flush.c b/block/blk-flush.c index 43e6b4755e9a..77f20458910c 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -144,7 +144,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { INIT_WORK(&rq->mq_flush_work, mq_flush_run); - kblockd_schedule_work(rq->q, &rq->mq_flush_work); + kblockd_schedule_work(&rq->mq_flush_work); return false; } else { if (add_front) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d2a9bdbee57..9c8f1f4ada7f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -608,11 +608,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) if (!async) __blk_mq_run_hw_queue(hctx); - else { - struct request_queue *q = hctx->queue; - - kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0); - } + else + kblockd_schedule_delayed_work(&hctx->delayed_work, 0); } void blk_mq_run_queues(struct request_queue *q, bool async) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e0985f1955e7..5063a0bd831a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -908,7 +908,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); - kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); + kblockd_schedule_work(&cfqd->unplug_work); } } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 5681c05ac506..91f99f4ce2e8 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -139,7 +139,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) */ spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - kblockd_schedule_work(q, &device->requeue_work); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1e1fa3f93d5f..2425945d36ab 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1359,8 +1359,8 @@ static inline void put_dev_sector(Sector p) } struct work_struct; -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP /* -- cgit v1.2.3 From 8ab14595b6dffecea264dcca2d6d9eea7c59273a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Apr 2014 09:17:40 -0600 Subject: block: add kblockd_schedule_delayed_work_on() Same function as kblockd_schedule_delayed_work(), but allow the caller to pass in a CPU that the work should be executed on. This just directly extends and maps into the workqueue API, and will be used to make the blk-mq mappings more strict. Signed-off-by: Jens Axboe --- block/blk-core.c | 7 +++++++ include/linux/blkdev.h | 1 + 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index f7d2c3335dfa..7af4a4898dcb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2917,6 +2917,13 @@ int kblockd_schedule_delayed_work(struct delayed_work *dwork, } EXPORT_SYMBOL(kblockd_schedule_delayed_work); +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); +} +EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); + #define PLUG_MAGIC 0x91827364 /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2425945d36ab..5a31307c5ded 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1361,6 +1361,7 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP /* -- cgit v1.2.3 From e4043dcf30811f5db15181168e2aac172514302a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 9 Apr 2014 10:18:23 -0600 Subject: blk-mq: ensure that hardware queues are always run on the mapped CPUs Instead of providing soft mappings with no guarantees on hardware queues always being run on the right CPU, switch to a hard mapping guarantee that ensure that we always run the hardware queue on (one of, if more) the mapped CPU. Signed-off-by: Jens Axboe --- block/blk-mq.c | 66 ++++++++++++++++++++++++++++++++++++++------------ include/linux/blk-mq.h | 1 + 2 files changed, 52 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 9c8f1f4ada7f..5455ed19de1c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -209,11 +209,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_put_ctx(ctx); - if (!(gfp & __GFP_WAIT)) + if (gfp & __GFP_WAIT) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + } else { + blk_mq_put_ctx(ctx); break; + } - __blk_mq_run_hw_queue(hctx); blk_mq_wait_for_tags(hctx->tags); } while (1); @@ -514,6 +517,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) LIST_HEAD(rq_list); int bit, queued; + WARN_ON(!preempt_count()); + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; @@ -606,10 +611,22 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; - if (!async) + if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) __blk_mq_run_hw_queue(hctx); - else + else if (hctx->queue->nr_hw_queues == 1) kblockd_schedule_delayed_work(&hctx->delayed_work, 0); + else { + unsigned int cpu; + + /* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement + * than the first CPU. Or we could round-robin here. For now, + * just queue on the first CPU. + */ + cpu = cpumask_first(hctx->cpumask); + kblockd_schedule_delayed_work_on(cpu, &hctx->delayed_work, 0); + } } void blk_mq_run_queues(struct request_queue *q, bool async) @@ -623,7 +640,9 @@ void blk_mq_run_queues(struct request_queue *q, bool async) test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; + preempt_disable(); blk_mq_run_hw_queue(hctx, async); + preempt_enable(); } } EXPORT_SYMBOL(blk_mq_run_queues); @@ -648,7 +667,10 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queues); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + + preempt_disable(); __blk_mq_run_hw_queue(hctx); + preempt_enable(); } EXPORT_SYMBOL(blk_mq_start_hw_queue); @@ -662,7 +684,9 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q) continue; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + preempt_disable(); blk_mq_run_hw_queue(hctx, true); + preempt_enable(); } } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); @@ -672,7 +696,10 @@ static void blk_mq_work_fn(struct work_struct *work) struct blk_mq_hw_ctx *hctx; hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); + + preempt_disable(); __blk_mq_run_hw_queue(hctx); + preempt_enable(); } static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, @@ -716,10 +743,10 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, spin_unlock(&ctx->lock); } - blk_mq_put_ctx(current_ctx); - if (run_queue) blk_mq_run_hw_queue(hctx, async); + + blk_mq_put_ctx(current_ctx); } static void blk_mq_insert_requests(struct request_queue *q, @@ -755,9 +782,8 @@ static void blk_mq_insert_requests(struct request_queue *q, } spin_unlock(&ctx->lock); - blk_mq_put_ctx(current_ctx); - blk_mq_run_hw_queue(hctx, from_schedule); + blk_mq_put_ctx(current_ctx); } static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) @@ -876,7 +902,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); - blk_mq_put_ctx(ctx); blk_insert_flush(rq); goto run_queue; } @@ -914,7 +939,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) } spin_unlock(&ctx->lock); - blk_mq_put_ctx(ctx); /* * For a SYNC request, send it to the hardware immediately. For an @@ -923,6 +947,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) */ run_queue: blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua); + blk_mq_put_ctx(ctx); } /* @@ -990,9 +1015,9 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); - blk_mq_put_ctx(ctx); blk_mq_run_hw_queue(hctx, true); + blk_mq_put_ctx(ctx); } static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, @@ -1255,12 +1280,13 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, __ctx->queue = q; /* If the cpu isn't online, the cpu is mapped to first hctx */ - hctx = q->mq_ops->map_queue(q, i); - hctx->nr_ctx++; - if (!cpu_online(i)) continue; + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); + hctx->nr_ctx++; + /* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device @@ -1277,6 +1303,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) struct blk_mq_ctx *ctx; queue_for_each_hw_ctx(q, hctx, i) { + cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; } @@ -1285,7 +1312,11 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ + if (!cpu_online(i)) + continue; + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } @@ -1329,6 +1360,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, if (!hctxs[i]) goto err_hctxs; + if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL)) + goto err_hctxs; + hctxs[i]->numa_node = NUMA_NO_NODE; hctxs[i]->queue_num = i; } @@ -1392,6 +1426,7 @@ err_hctxs: for (i = 0; i < reg->nr_hw_queues; i++) { if (!hctxs[i]) break; + free_cpumask_var(hctxs[i]->cpumask); reg->ops->free_hctx(hctxs[i], i); } kfree(hctxs); @@ -1413,6 +1448,7 @@ void blk_mq_free_queue(struct request_queue *q) blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); + free_cpumask_var(hctx->cpumask); q->mq_ops->free_hctx(hctx, i); } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0120451545d8..b6ee48740458 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -19,6 +19,7 @@ struct blk_mq_hw_ctx { unsigned long state; /* BLK_MQ_S_* flags */ struct delayed_work delayed_work; + cpumask_var_t cpumask; unsigned long flags; /* BLK_MQ_F_* flags */ -- cgit v1.2.3 From e3ec0a8c6eea4f22d5468f01c065caca4dd1ecca Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 13 Apr 2014 14:09:15 +0200 Subject: reset: Add of_reset_control_get to reset.h of_reset_control_get is not declared static in drivers/reset/core.c, which is correct as we want to use it elsewhere too. But it does not have a protype declared anywhere under include/linux. Add a prototype / stub for it to linux/reset.h to fix this. Reviewed-by: Josh Triplett Signed-off-by: Hans de Goede Signed-off-by: Philipp Zabel --- include/linux/reset.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/reset.h b/include/linux/reset.h index c0eda5023d74..349f150ae12c 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,6 +2,7 @@ #define _LINUX_RESET_H_ struct device; +struct device_node; struct reset_control; #ifdef CONFIG_RESET_CONTROLLER @@ -33,6 +34,9 @@ static inline struct reset_control *devm_reset_control_get_optional( return devm_reset_control_get(dev, id); } +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id); + #else static inline int reset_control_reset(struct reset_control *rstc) @@ -75,6 +79,12 @@ static inline struct reset_control *devm_reset_control_get_optional( return ERR_PTR(-ENOSYS); } +static inline struct reset_control *of_reset_control_get( + struct device_node *node, const char *id) +{ + return ERR_PTR(-ENOSYS); +} + #endif /* CONFIG_RESET_CONTROLLER */ #endif -- cgit v1.2.3 From 766e3721990d2c78e0d614b57753f105adbaa8c5 Mon Sep 17 00:00:00 2001 From: Scott Jiang Date: Fri, 4 Apr 2014 16:27:17 +0800 Subject: spi: convert spi-bfin-v3.c to a multiplatform driver Spi v3 controller is not only used on Blackfin. So rename it and use ioread/iowrite api to make it work on other platform. Signed-off-by: Scott Jiang Signed-off-by: Mark Brown --- arch/blackfin/include/asm/bfin_spi3.h | 258 --------- arch/blackfin/mach-bf609/boards/ezkit.c | 22 +- drivers/spi/Kconfig | 4 +- drivers/spi/Makefile | 2 +- drivers/spi/spi-adi-v3.c | 985 ++++++++++++++++++++++++++++++++ drivers/spi/spi-bfin-v3.c | 965 ------------------------------- include/linux/spi/adi_spi3.h | 254 ++++++++ 7 files changed, 1253 insertions(+), 1237 deletions(-) delete mode 100644 arch/blackfin/include/asm/bfin_spi3.h create mode 100644 drivers/spi/spi-adi-v3.c delete mode 100644 drivers/spi/spi-bfin-v3.c create mode 100644 include/linux/spi/adi_spi3.h (limited to 'include/linux') diff --git a/arch/blackfin/include/asm/bfin_spi3.h b/arch/blackfin/include/asm/bfin_spi3.h deleted file mode 100644 index 0957e65a54be..000000000000 --- a/arch/blackfin/include/asm/bfin_spi3.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Analog Devices SPI3 controller driver - * - * Copyright (c) 2011 Analog Devices Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef _SPI_CHANNEL_H_ -#define _SPI_CHANNEL_H_ - -#include - -/* SPI_CONTROL */ -#define SPI_CTL_EN 0x00000001 /* Enable */ -#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */ -#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */ -#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */ -#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */ -#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */ -#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */ -#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */ -#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */ -#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */ -#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */ -#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */ -#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */ -#define SPI_CTL_LSBF 0x00001000 /* LSB First */ -#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */ -#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */ -#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */ -#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */ -#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */ -#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */ -#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */ -#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */ -#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */ -#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */ -#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */ -#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */ -#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */ -/* SPI_RX_CONTROL */ -#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */ -#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */ -#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */ -#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */ -#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */ -#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */ -#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */ -#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */ -#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */ -#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */ -#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */ -#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */ -#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */ -#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */ -#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */ -#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */ -#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */ -#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */ -#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */ -#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */ -#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */ -#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */ -/* SPI_TX_CONTROL */ -#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */ -#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */ -#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */ -#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */ -#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */ -#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */ -#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */ -#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */ -#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */ -#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */ -#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */ -#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */ -#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */ -#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */ -#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */ -#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */ -#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */ -#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */ -#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */ -#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */ -#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */ -#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */ -/* SPI_CLOCK */ -#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */ -/* SPI_DELAY */ -#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */ -#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */ -#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */ -/* SPI_SSEL */ -#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */ -#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */ -#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */ -#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */ -#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */ -#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */ -#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */ -#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */ -#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */ -#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */ -#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */ -#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */ -#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */ -#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */ -/* SPI_RWC */ -#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */ -/* SPI_RWCR */ -#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */ -/* SPI_TWC */ -#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */ -/* SPI_TWCR */ -#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */ -/* SPI_IMASK */ -#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ -#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ -#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ -#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ -#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ -#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ -#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */ -#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */ -#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */ -#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ -/* SPI_IMASKCL */ -#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ -#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ -#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ -#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ -#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ -#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ -#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */ -#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */ -#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */ -#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ -/* SPI_IMASKST */ -#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ -#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ -#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ -#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ -#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ -#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ -#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */ -#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */ -#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */ -#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ -/* SPI_STATUS */ -#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */ -#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */ -#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */ -#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */ -#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */ -#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */ -#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */ -#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */ -#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */ -#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */ -#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */ -#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */ -#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */ -#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */ -#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */ -#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */ -#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */ -#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */ -#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */ -#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */ -#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */ -#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */ -#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */ -#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */ -#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */ -#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */ -/* SPI_ILAT */ -#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */ -#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */ -#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */ -#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */ -#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */ -#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */ -#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */ -#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */ -#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */ -#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */ -/* SPI_ILATCL */ -#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */ -#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */ -#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */ -#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */ -#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */ -#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */ -#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */ -#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */ -#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */ -#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */ - -/* - * bfin spi3 registers layout - */ -struct bfin_spi_regs { - u32 revid; - u32 control; - u32 rx_control; - u32 tx_control; - u32 clock; - u32 delay; - u32 ssel; - u32 rwc; - u32 rwcr; - u32 twc; - u32 twcr; - u32 reserved0; - u32 emask; - u32 emaskcl; - u32 emaskst; - u32 reserved1; - u32 status; - u32 elat; - u32 elatcl; - u32 reserved2; - u32 rfifo; - u32 reserved3; - u32 tfifo; -}; - -#define MAX_CTRL_CS 8 /* cs in spi controller */ - -/* device.platform_data for SSP controller devices */ -struct bfin_spi3_master { - u16 num_chipselect; - u16 pin_req[7]; -}; - -/* spi_board_info.controller_data for SPI slave devices, - * copied to spi_device.platform_data ... mostly for dma tuning - */ -struct bfin_spi3_chip { - u32 control; - u16 cs_chg_udelay; /* Some devices require 16-bit delays */ - u32 tx_dummy_val; /* tx value for rx only transfer */ - bool enable_dma; -}; - -#endif /* _SPI_CHANNEL_H_ */ diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index 943f7e95ec15..1ba4600de69f 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -767,13 +767,13 @@ static struct flash_platform_data bfin_spi_flash_data = { .type = "w25q32", }; -static struct bfin_spi3_chip spi_flash_chip_info = { +static struct adi_spi3_chip spi_flash_chip_info = { .enable_dma = true, /* use dma transfer with this chip*/ }; #endif #if IS_ENABLED(CONFIG_SPI_SPIDEV) -static struct bfin_spi3_chip spidev_chip_info = { +static struct adi_spi3_chip spidev_chip_info = { .enable_dma = true, }; #endif @@ -1736,7 +1736,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif }; -#if IS_ENABLED(CONFIG_SPI_BFIN_V3) +#if IS_ENABLED(CONFIG_SPI_ADI_V3) /* SPI (0) */ static struct resource bfin_spi0_resource[] = { { @@ -1777,13 +1777,13 @@ static struct resource bfin_spi1_resource[] = { }; /* SPI controller data */ -static struct bfin_spi3_master bf60x_spi_master_info0 = { +static struct adi_spi3_master bf60x_spi_master_info0 = { .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bf60x_spi_master0 = { - .name = "bfin-spi3", + .name = "adi-spi3", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, @@ -1792,13 +1792,13 @@ static struct platform_device bf60x_spi_master0 = { }, }; -static struct bfin_spi3_master bf60x_spi_master_info1 = { +static struct adi_spi3_master bf60x_spi_master_info1 = { .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, }; static struct platform_device bf60x_spi_master1 = { - .name = "bfin-spi3", + .name = "adi-spi3", .id = 1, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi1_resource), .resource = bfin_spi1_resource, @@ -1990,7 +1990,7 @@ static struct platform_device *ezkit_devices[] __initdata = { &bfin_sdh_device, #endif -#if IS_ENABLED(CONFIG_SPI_BFIN_V3) +#if IS_ENABLED(CONFIG_SPI_ADI_V3) &bf60x_spi_master0, &bf60x_spi_master1, #endif @@ -2051,8 +2051,8 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.1", "pinctrl-adi2.0", NULL, "uart1"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-sdh.0", "pinctrl-adi2.0", NULL, "rsi0"), PIN_MAP_MUX_GROUP_DEFAULT("stmmaceth.0", "pinctrl-adi2.0", NULL, "eth0"), - PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi3.0", "pinctrl-adi2.0", NULL, "spi0"), - PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi3.1", "pinctrl-adi2.0", NULL, "spi1"), + PIN_MAP_MUX_GROUP_DEFAULT("adi-spi3.0", "pinctrl-adi2.0", NULL, "spi0"), + PIN_MAP_MUX_GROUP_DEFAULT("adi-spi3.1", "pinctrl-adi2.0", NULL, "spi1"), PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.0", "pinctrl-adi2.0", NULL, "twi0"), PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.1", "pinctrl-adi2.0", NULL, "twi1"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 60f2b41c7310..a52e0edb7146 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -91,8 +91,8 @@ config SPI_BFIN5XX help This is the SPI controller master driver for Blackfin 5xx processor. -config SPI_BFIN_V3 - tristate "SPI controller v3 for Blackfin" +config SPI_ADI_V3 + tristate "SPI controller v3 for ADI" depends on BF60x help This is the SPI controller v3 master driver diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index bd792669e563..71e65dfc0ea3 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o -obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o +obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o diff --git a/drivers/spi/spi-adi-v3.c b/drivers/spi/spi-adi-v3.c new file mode 100644 index 000000000000..0c2914cfcdb5 --- /dev/null +++ b/drivers/spi/spi-adi-v3.c @@ -0,0 +1,985 @@ +/* + * Analog Devices SPI3 controller driver + * + * Copyright (c) 2014 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +enum adi_spi_state { + START_STATE, + RUNNING_STATE, + DONE_STATE, + ERROR_STATE +}; + +struct adi_spi_master; + +struct adi_spi_transfer_ops { + void (*write) (struct adi_spi_master *); + void (*read) (struct adi_spi_master *); + void (*duplex) (struct adi_spi_master *); +}; + +/* runtime info for spi master */ +struct adi_spi_master { + /* SPI framework hookup */ + struct spi_master *master; + + /* Regs base of SPI controller */ + struct adi_spi_regs __iomem *regs; + + /* Pin request list */ + u16 *pin_req; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct adi_spi_device *cur_chip; + unsigned transfer_len; + + /* transfer buffer */ + void *tx; + void *tx_end; + void *rx; + void *rx_end; + + /* dma info */ + unsigned int tx_dma; + unsigned int rx_dma; + dma_addr_t tx_dma_addr; + dma_addr_t rx_dma_addr; + unsigned long dummy_buffer; /* used in unidirectional transfer */ + unsigned long tx_dma_size; + unsigned long rx_dma_size; + int tx_num; + int rx_num; + + /* store register value for suspend/resume */ + u32 control; + u32 ssel; + + unsigned long sclk; + enum adi_spi_state state; + + const struct adi_spi_transfer_ops *ops; +}; + +struct adi_spi_device { + u32 control; + u32 clock; + u32 ssel; + + u8 cs; + u16 cs_chg_udelay; /* Some devices require > 255usec delay */ + u32 cs_gpio; + u32 tx_dummy_val; /* tx value for rx only transfer */ + bool enable_dma; + const struct adi_spi_transfer_ops *ops; +}; + +static void adi_spi_enable(struct adi_spi_master *drv_data) +{ + u32 ctl; + + ctl = ioread32(&drv_data->regs->control); + ctl |= SPI_CTL_EN; + iowrite32(ctl, &drv_data->regs->control); +} + +static void adi_spi_disable(struct adi_spi_master *drv_data) +{ + u32 ctl; + + ctl = ioread32(&drv_data->regs->control); + ctl &= ~SPI_CTL_EN; + iowrite32(ctl, &drv_data->regs->control); +} + +/* Caculate the SPI_CLOCK register value based on input HZ */ +static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz) +{ + u32 spi_clock = sclk / speed_hz; + + if (spi_clock) + spi_clock--; + return spi_clock; +} + +static int adi_spi_flush(struct adi_spi_master *drv_data) +{ + unsigned long limit = loops_per_jiffy << 1; + + /* wait for stop and clear stat */ + while (!(ioread32(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit) + cpu_relax(); + + iowrite32(0xFFFFFFFF, &drv_data->regs->status); + + return limit; +} + +/* Chip select operation functions for cs_change flag */ +static void adi_spi_cs_active(struct adi_spi_master *drv_data, struct adi_spi_device *chip) +{ + if (likely(chip->cs < MAX_CTRL_CS)) { + u32 reg; + reg = ioread32(&drv_data->regs->ssel); + reg &= ~chip->ssel; + iowrite32(reg, &drv_data->regs->ssel); + } else { + gpio_set_value(chip->cs_gpio, 0); + } +} + +static void adi_spi_cs_deactive(struct adi_spi_master *drv_data, + struct adi_spi_device *chip) +{ + if (likely(chip->cs < MAX_CTRL_CS)) { + u32 reg; + reg = ioread32(&drv_data->regs->ssel); + reg |= chip->ssel; + iowrite32(reg, &drv_data->regs->ssel); + } else { + gpio_set_value(chip->cs_gpio, 1); + } + + /* Move delay here for consistency */ + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); +} + +/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ +static inline void adi_spi_cs_enable(struct adi_spi_master *drv_data, + struct adi_spi_device *chip) +{ + if (chip->cs < MAX_CTRL_CS) { + u32 reg; + reg = ioread32(&drv_data->regs->ssel); + reg |= chip->ssel >> 8; + iowrite32(reg, &drv_data->regs->ssel); + } +} + +static inline void adi_spi_cs_disable(struct adi_spi_master *drv_data, + struct adi_spi_device *chip) +{ + if (chip->cs < MAX_CTRL_CS) { + u32 reg; + reg = ioread32(&drv_data->regs->ssel); + reg &= ~(chip->ssel >> 8); + iowrite32(reg, &drv_data->regs->ssel); + } +} + +/* stop controller and re-config current chip*/ +static void adi_spi_restore_state(struct adi_spi_master *drv_data) +{ + struct adi_spi_device *chip = drv_data->cur_chip; + + /* Clear status and disable clock */ + iowrite32(0xFFFFFFFF, &drv_data->regs->status); + iowrite32(0x0, &drv_data->regs->rx_control); + iowrite32(0x0, &drv_data->regs->tx_control); + adi_spi_disable(drv_data); + + /* Load the registers */ + iowrite32(chip->control, &drv_data->regs->control); + iowrite32(chip->clock, &drv_data->regs->clock); + + adi_spi_enable(drv_data); + drv_data->tx_num = drv_data->rx_num = 0; + /* we always choose tx transfer initiate */ + iowrite32(SPI_RXCTL_REN, &drv_data->regs->rx_control); + iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI, &drv_data->regs->tx_control); + adi_spi_cs_active(drv_data, chip); +} + +/* discard invalid rx data and empty rfifo */ +static inline void dummy_read(struct adi_spi_master *drv_data) +{ + while (!(ioread32(&drv_data->regs->status) & SPI_STAT_RFE)) + ioread32(&drv_data->regs->rfifo); +} + +static void adi_spi_u8_write(struct adi_spi_master *drv_data) +{ + dummy_read(drv_data); + while (drv_data->tx < drv_data->tx_end) { + iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo); + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + ioread32(&drv_data->regs->rfifo); + } +} + +static void adi_spi_u8_read(struct adi_spi_master *drv_data) +{ + u32 tx_val = drv_data->cur_chip->tx_dummy_val; + + dummy_read(drv_data); + while (drv_data->rx < drv_data->rx_end) { + iowrite32(tx_val, &drv_data->regs->tfifo); + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo); + } +} + +static void adi_spi_u8_duplex(struct adi_spi_master *drv_data) +{ + dummy_read(drv_data); + while (drv_data->rx < drv_data->rx_end) { + iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo); + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo); + } +} + +static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u8 = { + .write = adi_spi_u8_write, + .read = adi_spi_u8_read, + .duplex = adi_spi_u8_duplex, +}; + +static void adi_spi_u16_write(struct adi_spi_master *drv_data) +{ + dummy_read(drv_data); + while (drv_data->tx < drv_data->tx_end) { + iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo); + drv_data->tx += 2; + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + ioread32(&drv_data->regs->rfifo); + } +} + +static void adi_spi_u16_read(struct adi_spi_master *drv_data) +{ + u32 tx_val = drv_data->cur_chip->tx_dummy_val; + + dummy_read(drv_data); + while (drv_data->rx < drv_data->rx_end) { + iowrite32(tx_val, &drv_data->regs->tfifo); + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo); + drv_data->rx += 2; + } +} + +static void adi_spi_u16_duplex(struct adi_spi_master *drv_data) +{ + dummy_read(drv_data); + while (drv_data->rx < drv_data->rx_end) { + iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo); + drv_data->tx += 2; + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo); + drv_data->rx += 2; + } +} + +static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u16 = { + .write = adi_spi_u16_write, + .read = adi_spi_u16_read, + .duplex = adi_spi_u16_duplex, +}; + +static void adi_spi_u32_write(struct adi_spi_master *drv_data) +{ + dummy_read(drv_data); + while (drv_data->tx < drv_data->tx_end) { + iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo); + drv_data->tx += 4; + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + ioread32(&drv_data->regs->rfifo); + } +} + +static void adi_spi_u32_read(struct adi_spi_master *drv_data) +{ + u32 tx_val = drv_data->cur_chip->tx_dummy_val; + + dummy_read(drv_data); + while (drv_data->rx < drv_data->rx_end) { + iowrite32(tx_val, &drv_data->regs->tfifo); + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo); + drv_data->rx += 4; + } +} + +static void adi_spi_u32_duplex(struct adi_spi_master *drv_data) +{ + dummy_read(drv_data); + while (drv_data->rx < drv_data->rx_end) { + iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo); + drv_data->tx += 4; + while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE) + cpu_relax(); + *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo); + drv_data->rx += 4; + } +} + +static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u32 = { + .write = adi_spi_u32_write, + .read = adi_spi_u32_read, + .duplex = adi_spi_u32_duplex, +}; + + +/* test if there is more transfer to be done */ +static void adi_spi_next_transfer(struct adi_spi_master *drv) +{ + struct spi_message *msg = drv->cur_msg; + struct spi_transfer *t = drv->cur_transfer; + + /* Move to next transfer */ + if (t->transfer_list.next != &msg->transfers) { + drv->cur_transfer = list_entry(t->transfer_list.next, + struct spi_transfer, transfer_list); + drv->state = RUNNING_STATE; + } else { + drv->state = DONE_STATE; + drv->cur_transfer = NULL; + } +} + +static void adi_spi_giveback(struct adi_spi_master *drv_data) +{ + struct adi_spi_device *chip = drv_data->cur_chip; + + adi_spi_cs_deactive(drv_data, chip); + spi_finalize_current_message(drv_data->master); +} + +static int adi_spi_setup_transfer(struct adi_spi_master *drv) +{ + struct spi_transfer *t = drv->cur_transfer; + u32 cr, cr_width; + + if (t->tx_buf) { + drv->tx = (void *)t->tx_buf; + drv->tx_end = drv->tx + t->len; + } else { + drv->tx = NULL; + } + + if (t->rx_buf) { + drv->rx = t->rx_buf; + drv->rx_end = drv->rx + t->len; + } else { + drv->rx = NULL; + } + + drv->transfer_len = t->len; + + /* bits per word setup */ + switch (t->bits_per_word) { + case 8: + cr_width = SPI_CTL_SIZE08; + drv->ops = &adi_spi_transfer_ops_u8; + break; + case 16: + cr_width = SPI_CTL_SIZE16; + drv->ops = &adi_spi_transfer_ops_u16; + break; + case 32: + cr_width = SPI_CTL_SIZE32; + drv->ops = &adi_spi_transfer_ops_u32; + break; + default: + return -EINVAL; + } + cr = ioread32(&drv->regs->control) & ~SPI_CTL_SIZE; + cr |= cr_width; + iowrite32(cr, &drv->regs->control); + + /* speed setup */ + iowrite32(hz_to_spi_clock(drv->sclk, t->speed_hz), &drv->regs->clock); + return 0; +} + +static int adi_spi_dma_xfer(struct adi_spi_master *drv_data) +{ + struct spi_transfer *t = drv_data->cur_transfer; + struct spi_message *msg = drv_data->cur_msg; + struct adi_spi_device *chip = drv_data->cur_chip; + u32 dma_config; + unsigned long word_count, word_size; + void *tx_buf, *rx_buf; + + switch (t->bits_per_word) { + case 8: + dma_config = WDSIZE_8 | PSIZE_8; + word_count = drv_data->transfer_len; + word_size = 1; + break; + case 16: + dma_config = WDSIZE_16 | PSIZE_16; + word_count = drv_data->transfer_len / 2; + word_size = 2; + break; + default: + dma_config = WDSIZE_32 | PSIZE_32; + word_count = drv_data->transfer_len / 4; + word_size = 4; + break; + } + + if (!drv_data->rx) { + tx_buf = drv_data->tx; + rx_buf = &drv_data->dummy_buffer; + drv_data->tx_dma_size = drv_data->transfer_len; + drv_data->rx_dma_size = sizeof(drv_data->dummy_buffer); + set_dma_x_modify(drv_data->tx_dma, word_size); + set_dma_x_modify(drv_data->rx_dma, 0); + } else if (!drv_data->tx) { + drv_data->dummy_buffer = chip->tx_dummy_val; + tx_buf = &drv_data->dummy_buffer; + rx_buf = drv_data->rx; + drv_data->tx_dma_size = sizeof(drv_data->dummy_buffer); + drv_data->rx_dma_size = drv_data->transfer_len; + set_dma_x_modify(drv_data->tx_dma, 0); + set_dma_x_modify(drv_data->rx_dma, word_size); + } else { + tx_buf = drv_data->tx; + rx_buf = drv_data->rx; + drv_data->tx_dma_size = drv_data->rx_dma_size + = drv_data->transfer_len; + set_dma_x_modify(drv_data->tx_dma, word_size); + set_dma_x_modify(drv_data->rx_dma, word_size); + } + + drv_data->tx_dma_addr = dma_map_single(&msg->spi->dev, + (void *)tx_buf, + drv_data->tx_dma_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&msg->spi->dev, + drv_data->tx_dma_addr)) + return -ENOMEM; + + drv_data->rx_dma_addr = dma_map_single(&msg->spi->dev, + (void *)rx_buf, + drv_data->rx_dma_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&msg->spi->dev, + drv_data->rx_dma_addr)) { + dma_unmap_single(&msg->spi->dev, + drv_data->tx_dma_addr, + drv_data->tx_dma_size, + DMA_TO_DEVICE); + return -ENOMEM; + } + + dummy_read(drv_data); + set_dma_x_count(drv_data->tx_dma, word_count); + set_dma_x_count(drv_data->rx_dma, word_count); + set_dma_start_addr(drv_data->tx_dma, drv_data->tx_dma_addr); + set_dma_start_addr(drv_data->rx_dma, drv_data->rx_dma_addr); + dma_config |= DMAFLOW_STOP | RESTART | DI_EN; + set_dma_config(drv_data->tx_dma, dma_config); + set_dma_config(drv_data->rx_dma, dma_config | WNR); + enable_dma(drv_data->tx_dma); + enable_dma(drv_data->rx_dma); + + iowrite32(SPI_RXCTL_REN | SPI_RXCTL_RDR_NE, + &drv_data->regs->rx_control); + iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF, + &drv_data->regs->tx_control); + + return 0; +} + +static int adi_spi_pio_xfer(struct adi_spi_master *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + + if (!drv_data->rx) { + /* write only half duplex */ + drv_data->ops->write(drv_data); + if (drv_data->tx != drv_data->tx_end) + return -EIO; + } else if (!drv_data->tx) { + /* read only half duplex */ + drv_data->ops->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + return -EIO; + } else { + /* full duplex mode */ + drv_data->ops->duplex(drv_data); + if (drv_data->tx != drv_data->tx_end) + return -EIO; + } + + if (!adi_spi_flush(drv_data)) + return -EIO; + msg->actual_length += drv_data->transfer_len; + tasklet_schedule(&drv_data->pump_transfers); + return 0; +} + +static void adi_spi_pump_transfers(unsigned long data) +{ + struct adi_spi_master *drv_data = (struct adi_spi_master *)data; + struct spi_message *msg = NULL; + struct spi_transfer *t = NULL; + struct adi_spi_device *chip = NULL; + int ret; + + /* Get current state information */ + msg = drv_data->cur_msg; + t = drv_data->cur_transfer; + chip = drv_data->cur_chip; + + /* Handle for abort */ + if (drv_data->state == ERROR_STATE) { + msg->status = -EIO; + adi_spi_giveback(drv_data); + return; + } + + if (drv_data->state == RUNNING_STATE) { + if (t->delay_usecs) + udelay(t->delay_usecs); + if (t->cs_change) + adi_spi_cs_deactive(drv_data, chip); + adi_spi_next_transfer(drv_data); + t = drv_data->cur_transfer; + } + /* Handle end of message */ + if (drv_data->state == DONE_STATE) { + msg->status = 0; + adi_spi_giveback(drv_data); + return; + } + + if ((t->len == 0) || (t->tx_buf == NULL && t->rx_buf == NULL)) { + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + return; + } + + ret = adi_spi_setup_transfer(drv_data); + if (ret) { + msg->status = ret; + adi_spi_giveback(drv_data); + } + + iowrite32(0xFFFFFFFF, &drv_data->regs->status); + adi_spi_cs_active(drv_data, chip); + drv_data->state = RUNNING_STATE; + + if (chip->enable_dma) + ret = adi_spi_dma_xfer(drv_data); + else + ret = adi_spi_pio_xfer(drv_data); + if (ret) { + msg->status = ret; + adi_spi_giveback(drv_data); + } +} + +static int adi_spi_transfer_one_message(struct spi_master *master, + struct spi_message *m) +{ + struct adi_spi_master *drv_data = spi_master_get_devdata(master); + + drv_data->cur_msg = m; + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + adi_spi_restore_state(drv_data); + + drv_data->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, transfer_list); + + tasklet_schedule(&drv_data->pump_transfers); + return 0; +} + +#define MAX_SPI_SSEL 7 + +static const u16 ssel[][MAX_SPI_SSEL] = { + {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, + P_SPI0_SSEL4, P_SPI0_SSEL5, + P_SPI0_SSEL6, P_SPI0_SSEL7}, + + {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, + P_SPI1_SSEL4, P_SPI1_SSEL5, + P_SPI1_SSEL6, P_SPI1_SSEL7}, + + {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, + P_SPI2_SSEL4, P_SPI2_SSEL5, + P_SPI2_SSEL6, P_SPI2_SSEL7}, +}; + +static int adi_spi_setup(struct spi_device *spi) +{ + struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master); + struct adi_spi_device *chip = spi_get_ctldata(spi); + u32 ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE; + int ret = -EINVAL; + + if (!chip) { + struct adi_spi3_chip *chip_info = spi->controller_data; + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, "can not allocate chip data\n"); + return -ENOMEM; + } + if (chip_info) { + if (chip_info->control & ~ctl_reg) { + dev_err(&spi->dev, + "do not set bits that the SPI framework manages\n"); + goto error; + } + chip->control = chip_info->control; + chip->cs_chg_udelay = chip_info->cs_chg_udelay; + chip->tx_dummy_val = chip_info->tx_dummy_val; + chip->enable_dma = chip_info->enable_dma; + } + chip->cs = spi->chip_select; + + if (chip->cs < MAX_CTRL_CS) { + chip->ssel = (1 << chip->cs) << 8; + ret = peripheral_request(ssel[spi->master->bus_num] + [chip->cs-1], dev_name(&spi->dev)); + if (ret) { + dev_err(&spi->dev, "peripheral_request() error\n"); + goto error; + } + } else { + chip->cs_gpio = chip->cs - MAX_CTRL_CS; + ret = gpio_request_one(chip->cs_gpio, GPIOF_OUT_INIT_HIGH, + dev_name(&spi->dev)); + if (ret) { + dev_err(&spi->dev, "gpio_request_one() error\n"); + goto error; + } + } + spi_set_ctldata(spi, chip); + } + + /* force a default base state */ + chip->control &= ctl_reg; + + if (spi->mode & SPI_CPOL) + chip->control |= SPI_CTL_CPOL; + if (spi->mode & SPI_CPHA) + chip->control |= SPI_CTL_CPHA; + if (spi->mode & SPI_LSB_FIRST) + chip->control |= SPI_CTL_LSBF; + chip->control |= SPI_CTL_MSTR; + /* we choose software to controll cs */ + chip->control &= ~SPI_CTL_ASSEL; + + chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz); + + adi_spi_cs_enable(drv_data, chip); + adi_spi_cs_deactive(drv_data, chip); + + return 0; +error: + if (chip) { + kfree(chip); + spi_set_ctldata(spi, NULL); + } + + return ret; +} + +static void adi_spi_cleanup(struct spi_device *spi) +{ + struct adi_spi_device *chip = spi_get_ctldata(spi); + struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master); + + if (!chip) + return; + + if (chip->cs < MAX_CTRL_CS) { + peripheral_free(ssel[spi->master->bus_num] + [chip->cs-1]); + adi_spi_cs_disable(drv_data, chip); + } else { + gpio_free(chip->cs_gpio); + } + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +static irqreturn_t adi_spi_tx_dma_isr(int irq, void *dev_id) +{ + struct adi_spi_master *drv_data = dev_id; + u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma); + u32 tx_ctl; + + clear_dma_irqstat(drv_data->tx_dma); + if (dma_stat & DMA_DONE) { + drv_data->tx_num++; + } else { + dev_err(&drv_data->master->dev, + "spi tx dma error: %d\n", dma_stat); + if (drv_data->tx) + drv_data->state = ERROR_STATE; + } + tx_ctl = ioread32(&drv_data->regs->tx_control); + tx_ctl &= ~SPI_TXCTL_TDR_NF; + iowrite32(tx_ctl, &drv_data->regs->tx_control); + return IRQ_HANDLED; +} + +static irqreturn_t adi_spi_rx_dma_isr(int irq, void *dev_id) +{ + struct adi_spi_master *drv_data = dev_id; + struct spi_message *msg = drv_data->cur_msg; + u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma); + + clear_dma_irqstat(drv_data->rx_dma); + if (dma_stat & DMA_DONE) { + drv_data->rx_num++; + /* we may fail on tx dma */ + if (drv_data->state != ERROR_STATE) + msg->actual_length += drv_data->transfer_len; + } else { + drv_data->state = ERROR_STATE; + dev_err(&drv_data->master->dev, + "spi rx dma error: %d\n", dma_stat); + } + iowrite32(0, &drv_data->regs->tx_control); + iowrite32(0, &drv_data->regs->rx_control); + if (drv_data->rx_num != drv_data->tx_num) + dev_dbg(&drv_data->master->dev, + "dma interrupt missing: tx=%d,rx=%d\n", + drv_data->tx_num, drv_data->rx_num); + tasklet_schedule(&drv_data->pump_transfers); + return IRQ_HANDLED; +} + +static int adi_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct adi_spi3_master *info = dev_get_platdata(dev); + struct spi_master *master; + struct adi_spi_master *drv_data; + struct resource *mem, *res; + unsigned int tx_dma, rx_dma; + unsigned long sclk; + int ret; + + if (!info) { + dev_err(dev, "platform data missing!\n"); + return -ENODEV; + } + + sclk = get_sclk1(); + if (!sclk) { + dev_err(dev, "can not get sclk1\n"); + return -ENXIO; + } + + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!res) { + dev_err(dev, "can not get tx dma resource\n"); + return -ENXIO; + } + tx_dma = res->start; + + res = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!res) { + dev_err(dev, "can not get rx dma resource\n"); + return -ENXIO; + } + rx_dma = res->start; + + /* allocate master with space for drv_data */ + master = spi_alloc_master(dev, sizeof(*drv_data)); + if (!master) { + dev_err(dev, "can not alloc spi_master\n"); + return -ENOMEM; + } + platform_set_drvdata(pdev, master); + + /* the mode bits supported by this driver */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + + master->bus_num = pdev->id; + master->num_chipselect = info->num_chipselect; + master->cleanup = adi_spi_cleanup; + master->setup = adi_spi_setup; + master->transfer_one_message = adi_spi_transfer_one_message; + master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(8); + + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->tx_dma = tx_dma; + drv_data->rx_dma = rx_dma; + drv_data->pin_req = info->pin_req; + drv_data->sclk = sclk; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + drv_data->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(drv_data->regs)) { + ret = PTR_ERR(drv_data->regs); + goto err_put_master; + } + + /* request tx and rx dma */ + ret = request_dma(tx_dma, "SPI_TX_DMA"); + if (ret) { + dev_err(dev, "can not request SPI TX DMA channel\n"); + goto err_put_master; + } + set_dma_callback(tx_dma, adi_spi_tx_dma_isr, drv_data); + + ret = request_dma(rx_dma, "SPI_RX_DMA"); + if (ret) { + dev_err(dev, "can not request SPI RX DMA channel\n"); + goto err_free_tx_dma; + } + set_dma_callback(drv_data->rx_dma, adi_spi_rx_dma_isr, drv_data); + + /* request CLK, MOSI and MISO */ + ret = peripheral_request_list(drv_data->pin_req, "adi-spi3"); + if (ret < 0) { + dev_err(dev, "can not request spi pins\n"); + goto err_free_rx_dma; + } + + iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control); + iowrite32(0x0000FE00, &drv_data->regs->ssel); + iowrite32(0x0, &drv_data->regs->delay); + + tasklet_init(&drv_data->pump_transfers, + adi_spi_pump_transfers, (unsigned long)drv_data); + /* register with the SPI framework */ + ret = devm_spi_register_master(dev, master); + if (ret) { + dev_err(dev, "can not register spi master\n"); + goto err_free_peripheral; + } + + return ret; + +err_free_peripheral: + peripheral_free_list(drv_data->pin_req); +err_free_rx_dma: + free_dma(rx_dma); +err_free_tx_dma: + free_dma(tx_dma); +err_put_master: + spi_master_put(master); + + return ret; +} + +static int adi_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct adi_spi_master *drv_data = spi_master_get_devdata(master); + + adi_spi_disable(drv_data); + peripheral_free_list(drv_data->pin_req); + free_dma(drv_data->rx_dma); + free_dma(drv_data->tx_dma); + return 0; +} + +#ifdef CONFIG_PM +static int adi_spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct adi_spi_master *drv_data = spi_master_get_devdata(master); + + spi_master_suspend(master); + + drv_data->control = ioread32(&drv_data->regs->control); + drv_data->ssel = ioread32(&drv_data->regs->ssel); + + iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control); + iowrite32(0x0000FE00, &drv_data->regs->ssel); + dma_disable_irq(drv_data->rx_dma); + dma_disable_irq(drv_data->tx_dma); + + return 0; +} + +static int adi_spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct adi_spi_master *drv_data = spi_master_get_devdata(master); + int ret = 0; + + /* bootrom may modify spi and dma status when resume in spi boot mode */ + disable_dma(drv_data->rx_dma); + + dma_enable_irq(drv_data->rx_dma); + dma_enable_irq(drv_data->tx_dma); + iowrite32(drv_data->control, &drv_data->regs->control); + iowrite32(drv_data->ssel, &drv_data->regs->ssel); + + ret = spi_master_resume(master); + if (ret) { + free_dma(drv_data->rx_dma); + free_dma(drv_data->tx_dma); + } + + return ret; +} +#endif +static const struct dev_pm_ops adi_spi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(adi_spi_suspend, adi_spi_resume) +}; + +MODULE_ALIAS("platform:adi-spi3"); +static struct platform_driver adi_spi_driver = { + .driver = { + .name = "adi-spi3", + .owner = THIS_MODULE, + .pm = &adi_spi_pm_ops, + }, + .remove = adi_spi_remove, +}; + +module_platform_driver_probe(adi_spi_driver, adi_spi_probe); + +MODULE_DESCRIPTION("Analog Devices SPI3 controller driver"); +MODULE_AUTHOR("Scott Jiang "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c deleted file mode 100644 index 4089d0e0d84e..000000000000 --- a/drivers/spi/spi-bfin-v3.c +++ /dev/null @@ -1,965 +0,0 @@ -/* - * Analog Devices SPI3 controller driver - * - * Copyright (c) 2013 Analog Devices Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -enum bfin_spi_state { - START_STATE, - RUNNING_STATE, - DONE_STATE, - ERROR_STATE -}; - -struct bfin_spi_master; - -struct bfin_spi_transfer_ops { - void (*write) (struct bfin_spi_master *); - void (*read) (struct bfin_spi_master *); - void (*duplex) (struct bfin_spi_master *); -}; - -/* runtime info for spi master */ -struct bfin_spi_master { - /* SPI framework hookup */ - struct spi_master *master; - - /* Regs base of SPI controller */ - struct bfin_spi_regs __iomem *regs; - - /* Pin request list */ - u16 *pin_req; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message transfer state info */ - struct spi_message *cur_msg; - struct spi_transfer *cur_transfer; - struct bfin_spi_device *cur_chip; - unsigned transfer_len; - - /* transfer buffer */ - void *tx; - void *tx_end; - void *rx; - void *rx_end; - - /* dma info */ - unsigned int tx_dma; - unsigned int rx_dma; - dma_addr_t tx_dma_addr; - dma_addr_t rx_dma_addr; - unsigned long dummy_buffer; /* used in unidirectional transfer */ - unsigned long tx_dma_size; - unsigned long rx_dma_size; - int tx_num; - int rx_num; - - /* store register value for suspend/resume */ - u32 control; - u32 ssel; - - unsigned long sclk; - enum bfin_spi_state state; - - const struct bfin_spi_transfer_ops *ops; -}; - -struct bfin_spi_device { - u32 control; - u32 clock; - u32 ssel; - - u8 cs; - u16 cs_chg_udelay; /* Some devices require > 255usec delay */ - u32 cs_gpio; - u32 tx_dummy_val; /* tx value for rx only transfer */ - bool enable_dma; - const struct bfin_spi_transfer_ops *ops; -}; - -static void bfin_spi_enable(struct bfin_spi_master *drv_data) -{ - bfin_write_or(&drv_data->regs->control, SPI_CTL_EN); -} - -static void bfin_spi_disable(struct bfin_spi_master *drv_data) -{ - bfin_write_and(&drv_data->regs->control, ~SPI_CTL_EN); -} - -/* Caculate the SPI_CLOCK register value based on input HZ */ -static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz) -{ - u32 spi_clock = sclk / speed_hz; - - if (spi_clock) - spi_clock--; - return spi_clock; -} - -static int bfin_spi_flush(struct bfin_spi_master *drv_data) -{ - unsigned long limit = loops_per_jiffy << 1; - - /* wait for stop and clear stat */ - while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit) - cpu_relax(); - - bfin_write(&drv_data->regs->status, 0xFFFFFFFF); - - return limit; -} - -/* Chip select operation functions for cs_change flag */ -static void bfin_spi_cs_active(struct bfin_spi_master *drv_data, struct bfin_spi_device *chip) -{ - if (likely(chip->cs < MAX_CTRL_CS)) - bfin_write_and(&drv_data->regs->ssel, ~chip->ssel); - else - gpio_set_value(chip->cs_gpio, 0); -} - -static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data, - struct bfin_spi_device *chip) -{ - if (likely(chip->cs < MAX_CTRL_CS)) - bfin_write_or(&drv_data->regs->ssel, chip->ssel); - else - gpio_set_value(chip->cs_gpio, 1); - - /* Move delay here for consistency */ - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); -} - -/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ -static inline void bfin_spi_cs_enable(struct bfin_spi_master *drv_data, - struct bfin_spi_device *chip) -{ - if (chip->cs < MAX_CTRL_CS) - bfin_write_or(&drv_data->regs->ssel, chip->ssel >> 8); -} - -static inline void bfin_spi_cs_disable(struct bfin_spi_master *drv_data, - struct bfin_spi_device *chip) -{ - if (chip->cs < MAX_CTRL_CS) - bfin_write_and(&drv_data->regs->ssel, ~(chip->ssel >> 8)); -} - -/* stop controller and re-config current chip*/ -static void bfin_spi_restore_state(struct bfin_spi_master *drv_data) -{ - struct bfin_spi_device *chip = drv_data->cur_chip; - - /* Clear status and disable clock */ - bfin_write(&drv_data->regs->status, 0xFFFFFFFF); - bfin_write(&drv_data->regs->rx_control, 0x0); - bfin_write(&drv_data->regs->tx_control, 0x0); - bfin_spi_disable(drv_data); - - SSYNC(); - - /* Load the registers */ - bfin_write(&drv_data->regs->control, chip->control); - bfin_write(&drv_data->regs->clock, chip->clock); - - bfin_spi_enable(drv_data); - drv_data->tx_num = drv_data->rx_num = 0; - /* we always choose tx transfer initiate */ - bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN); - bfin_write(&drv_data->regs->tx_control, - SPI_TXCTL_TEN | SPI_TXCTL_TTI); - bfin_spi_cs_active(drv_data, chip); -} - -/* discard invalid rx data and empty rfifo */ -static inline void dummy_read(struct bfin_spi_master *drv_data) -{ - while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)) - bfin_read(&drv_data->regs->rfifo); -} - -static void bfin_spi_u8_write(struct bfin_spi_master *drv_data) -{ - dummy_read(drv_data); - while (drv_data->tx < drv_data->tx_end) { - bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - bfin_read(&drv_data->regs->rfifo); - } -} - -static void bfin_spi_u8_read(struct bfin_spi_master *drv_data) -{ - u32 tx_val = drv_data->cur_chip->tx_dummy_val; - - dummy_read(drv_data); - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tfifo, tx_val); - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); - } -} - -static void bfin_spi_u8_duplex(struct bfin_spi_master *drv_data) -{ - dummy_read(drv_data); - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); - } -} - -static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { - .write = bfin_spi_u8_write, - .read = bfin_spi_u8_read, - .duplex = bfin_spi_u8_duplex, -}; - -static void bfin_spi_u16_write(struct bfin_spi_master *drv_data) -{ - dummy_read(drv_data); - while (drv_data->tx < drv_data->tx_end) { - bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); - drv_data->tx += 2; - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - bfin_read(&drv_data->regs->rfifo); - } -} - -static void bfin_spi_u16_read(struct bfin_spi_master *drv_data) -{ - u32 tx_val = drv_data->cur_chip->tx_dummy_val; - - dummy_read(drv_data); - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tfifo, tx_val); - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); - drv_data->rx += 2; - } -} - -static void bfin_spi_u16_duplex(struct bfin_spi_master *drv_data) -{ - dummy_read(drv_data); - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); - drv_data->tx += 2; - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); - drv_data->rx += 2; - } -} - -static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { - .write = bfin_spi_u16_write, - .read = bfin_spi_u16_read, - .duplex = bfin_spi_u16_duplex, -}; - -static void bfin_spi_u32_write(struct bfin_spi_master *drv_data) -{ - dummy_read(drv_data); - while (drv_data->tx < drv_data->tx_end) { - bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); - drv_data->tx += 4; - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - bfin_read(&drv_data->regs->rfifo); - } -} - -static void bfin_spi_u32_read(struct bfin_spi_master *drv_data) -{ - u32 tx_val = drv_data->cur_chip->tx_dummy_val; - - dummy_read(drv_data); - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tfifo, tx_val); - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); - drv_data->rx += 4; - } -} - -static void bfin_spi_u32_duplex(struct bfin_spi_master *drv_data) -{ - dummy_read(drv_data); - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); - drv_data->tx += 4; - while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) - cpu_relax(); - *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); - drv_data->rx += 4; - } -} - -static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u32 = { - .write = bfin_spi_u32_write, - .read = bfin_spi_u32_read, - .duplex = bfin_spi_u32_duplex, -}; - - -/* test if there is more transfer to be done */ -static void bfin_spi_next_transfer(struct bfin_spi_master *drv) -{ - struct spi_message *msg = drv->cur_msg; - struct spi_transfer *t = drv->cur_transfer; - - /* Move to next transfer */ - if (t->transfer_list.next != &msg->transfers) { - drv->cur_transfer = list_entry(t->transfer_list.next, - struct spi_transfer, transfer_list); - drv->state = RUNNING_STATE; - } else { - drv->state = DONE_STATE; - drv->cur_transfer = NULL; - } -} - -static void bfin_spi_giveback(struct bfin_spi_master *drv_data) -{ - struct bfin_spi_device *chip = drv_data->cur_chip; - - bfin_spi_cs_deactive(drv_data, chip); - spi_finalize_current_message(drv_data->master); -} - -static int bfin_spi_setup_transfer(struct bfin_spi_master *drv) -{ - struct spi_transfer *t = drv->cur_transfer; - u32 cr, cr_width; - - if (t->tx_buf) { - drv->tx = (void *)t->tx_buf; - drv->tx_end = drv->tx + t->len; - } else { - drv->tx = NULL; - } - - if (t->rx_buf) { - drv->rx = t->rx_buf; - drv->rx_end = drv->rx + t->len; - } else { - drv->rx = NULL; - } - - drv->transfer_len = t->len; - - /* bits per word setup */ - switch (t->bits_per_word) { - case 8: - cr_width = SPI_CTL_SIZE08; - drv->ops = &bfin_bfin_spi_transfer_ops_u8; - break; - case 16: - cr_width = SPI_CTL_SIZE16; - drv->ops = &bfin_bfin_spi_transfer_ops_u16; - break; - case 32: - cr_width = SPI_CTL_SIZE32; - drv->ops = &bfin_bfin_spi_transfer_ops_u32; - break; - default: - return -EINVAL; - } - cr = bfin_read(&drv->regs->control) & ~SPI_CTL_SIZE; - cr |= cr_width; - bfin_write(&drv->regs->control, cr); - - /* speed setup */ - bfin_write(&drv->regs->clock, - hz_to_spi_clock(drv->sclk, t->speed_hz)); - return 0; -} - -static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data) -{ - struct spi_transfer *t = drv_data->cur_transfer; - struct spi_message *msg = drv_data->cur_msg; - struct bfin_spi_device *chip = drv_data->cur_chip; - u32 dma_config; - unsigned long word_count, word_size; - void *tx_buf, *rx_buf; - - switch (t->bits_per_word) { - case 8: - dma_config = WDSIZE_8 | PSIZE_8; - word_count = drv_data->transfer_len; - word_size = 1; - break; - case 16: - dma_config = WDSIZE_16 | PSIZE_16; - word_count = drv_data->transfer_len / 2; - word_size = 2; - break; - default: - dma_config = WDSIZE_32 | PSIZE_32; - word_count = drv_data->transfer_len / 4; - word_size = 4; - break; - } - - if (!drv_data->rx) { - tx_buf = drv_data->tx; - rx_buf = &drv_data->dummy_buffer; - drv_data->tx_dma_size = drv_data->transfer_len; - drv_data->rx_dma_size = sizeof(drv_data->dummy_buffer); - set_dma_x_modify(drv_data->tx_dma, word_size); - set_dma_x_modify(drv_data->rx_dma, 0); - } else if (!drv_data->tx) { - drv_data->dummy_buffer = chip->tx_dummy_val; - tx_buf = &drv_data->dummy_buffer; - rx_buf = drv_data->rx; - drv_data->tx_dma_size = sizeof(drv_data->dummy_buffer); - drv_data->rx_dma_size = drv_data->transfer_len; - set_dma_x_modify(drv_data->tx_dma, 0); - set_dma_x_modify(drv_data->rx_dma, word_size); - } else { - tx_buf = drv_data->tx; - rx_buf = drv_data->rx; - drv_data->tx_dma_size = drv_data->rx_dma_size - = drv_data->transfer_len; - set_dma_x_modify(drv_data->tx_dma, word_size); - set_dma_x_modify(drv_data->rx_dma, word_size); - } - - drv_data->tx_dma_addr = dma_map_single(&msg->spi->dev, - (void *)tx_buf, - drv_data->tx_dma_size, - DMA_TO_DEVICE); - if (dma_mapping_error(&msg->spi->dev, - drv_data->tx_dma_addr)) - return -ENOMEM; - - drv_data->rx_dma_addr = dma_map_single(&msg->spi->dev, - (void *)rx_buf, - drv_data->rx_dma_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(&msg->spi->dev, - drv_data->rx_dma_addr)) { - dma_unmap_single(&msg->spi->dev, - drv_data->tx_dma_addr, - drv_data->tx_dma_size, - DMA_TO_DEVICE); - return -ENOMEM; - } - - dummy_read(drv_data); - set_dma_x_count(drv_data->tx_dma, word_count); - set_dma_x_count(drv_data->rx_dma, word_count); - set_dma_start_addr(drv_data->tx_dma, drv_data->tx_dma_addr); - set_dma_start_addr(drv_data->rx_dma, drv_data->rx_dma_addr); - dma_config |= DMAFLOW_STOP | RESTART | DI_EN; - set_dma_config(drv_data->tx_dma, dma_config); - set_dma_config(drv_data->rx_dma, dma_config | WNR); - enable_dma(drv_data->tx_dma); - enable_dma(drv_data->rx_dma); - SSYNC(); - - bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN | SPI_RXCTL_RDR_NE); - SSYNC(); - bfin_write(&drv_data->regs->tx_control, - SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF); - - return 0; -} - -static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - - if (!drv_data->rx) { - /* write only half duplex */ - drv_data->ops->write(drv_data); - if (drv_data->tx != drv_data->tx_end) - return -EIO; - } else if (!drv_data->tx) { - /* read only half duplex */ - drv_data->ops->read(drv_data); - if (drv_data->rx != drv_data->rx_end) - return -EIO; - } else { - /* full duplex mode */ - drv_data->ops->duplex(drv_data); - if (drv_data->tx != drv_data->tx_end) - return -EIO; - } - - if (!bfin_spi_flush(drv_data)) - return -EIO; - msg->actual_length += drv_data->transfer_len; - tasklet_schedule(&drv_data->pump_transfers); - return 0; -} - -static void bfin_spi_pump_transfers(unsigned long data) -{ - struct bfin_spi_master *drv_data = (struct bfin_spi_master *)data; - struct spi_message *msg = NULL; - struct spi_transfer *t = NULL; - struct bfin_spi_device *chip = NULL; - int ret; - - /* Get current state information */ - msg = drv_data->cur_msg; - t = drv_data->cur_transfer; - chip = drv_data->cur_chip; - - /* Handle for abort */ - if (drv_data->state == ERROR_STATE) { - msg->status = -EIO; - bfin_spi_giveback(drv_data); - return; - } - - if (drv_data->state == RUNNING_STATE) { - if (t->delay_usecs) - udelay(t->delay_usecs); - if (t->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - bfin_spi_next_transfer(drv_data); - t = drv_data->cur_transfer; - } - /* Handle end of message */ - if (drv_data->state == DONE_STATE) { - msg->status = 0; - bfin_spi_giveback(drv_data); - return; - } - - if ((t->len == 0) || (t->tx_buf == NULL && t->rx_buf == NULL)) { - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); - return; - } - - ret = bfin_spi_setup_transfer(drv_data); - if (ret) { - msg->status = ret; - bfin_spi_giveback(drv_data); - } - - bfin_write(&drv_data->regs->status, 0xFFFFFFFF); - bfin_spi_cs_active(drv_data, chip); - drv_data->state = RUNNING_STATE; - - if (chip->enable_dma) - ret = bfin_spi_dma_xfer(drv_data); - else - ret = bfin_spi_pio_xfer(drv_data); - if (ret) { - msg->status = ret; - bfin_spi_giveback(drv_data); - } -} - -static int bfin_spi_transfer_one_message(struct spi_master *master, - struct spi_message *m) -{ - struct bfin_spi_master *drv_data = spi_master_get_devdata(master); - - drv_data->cur_msg = m; - drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); - bfin_spi_restore_state(drv_data); - - drv_data->state = START_STATE; - drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, - struct spi_transfer, transfer_list); - - tasklet_schedule(&drv_data->pump_transfers); - return 0; -} - -#define MAX_SPI_SSEL 7 - -static const u16 ssel[][MAX_SPI_SSEL] = { - {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, - P_SPI0_SSEL4, P_SPI0_SSEL5, - P_SPI0_SSEL6, P_SPI0_SSEL7}, - - {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, - P_SPI1_SSEL4, P_SPI1_SSEL5, - P_SPI1_SSEL6, P_SPI1_SSEL7}, - - {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, - P_SPI2_SSEL4, P_SPI2_SSEL5, - P_SPI2_SSEL6, P_SPI2_SSEL7}, -}; - -static int bfin_spi_setup(struct spi_device *spi) -{ - struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); - struct bfin_spi_device *chip = spi_get_ctldata(spi); - u32 bfin_ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE; - int ret = -EINVAL; - - if (!chip) { - struct bfin_spi3_chip *chip_info = spi->controller_data; - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, "can not allocate chip data\n"); - return -ENOMEM; - } - if (chip_info) { - if (chip_info->control & ~bfin_ctl_reg) { - dev_err(&spi->dev, - "do not set bits that the SPI framework manages\n"); - goto error; - } - chip->control = chip_info->control; - chip->cs_chg_udelay = chip_info->cs_chg_udelay; - chip->tx_dummy_val = chip_info->tx_dummy_val; - chip->enable_dma = chip_info->enable_dma; - } - chip->cs = spi->chip_select; - if (chip->cs < MAX_CTRL_CS) { - chip->ssel = (1 << chip->cs) << 8; - ret = peripheral_request(ssel[spi->master->bus_num] - [chip->cs-1], dev_name(&spi->dev)); - if (ret) { - dev_err(&spi->dev, "peripheral_request() error\n"); - goto error; - } - } else { - chip->cs_gpio = chip->cs - MAX_CTRL_CS; - ret = gpio_request_one(chip->cs_gpio, GPIOF_OUT_INIT_HIGH, - dev_name(&spi->dev)); - if (ret) { - dev_err(&spi->dev, "gpio_request_one() error\n"); - goto error; - } - } - spi_set_ctldata(spi, chip); - } - - /* force a default base state */ - chip->control &= bfin_ctl_reg; - - if (spi->mode & SPI_CPOL) - chip->control |= SPI_CTL_CPOL; - if (spi->mode & SPI_CPHA) - chip->control |= SPI_CTL_CPHA; - if (spi->mode & SPI_LSB_FIRST) - chip->control |= SPI_CTL_LSBF; - chip->control |= SPI_CTL_MSTR; - /* we choose software to controll cs */ - chip->control &= ~SPI_CTL_ASSEL; - - chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz); - - bfin_spi_cs_enable(drv_data, chip); - bfin_spi_cs_deactive(drv_data, chip); - - return 0; -error: - if (chip) { - kfree(chip); - spi_set_ctldata(spi, NULL); - } - - return ret; -} - -static void bfin_spi_cleanup(struct spi_device *spi) -{ - struct bfin_spi_device *chip = spi_get_ctldata(spi); - struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); - - if (!chip) - return; - - if (chip->cs < MAX_CTRL_CS) { - peripheral_free(ssel[spi->master->bus_num] - [chip->cs-1]); - bfin_spi_cs_disable(drv_data, chip); - } else { - gpio_free(chip->cs_gpio); - } - - kfree(chip); - spi_set_ctldata(spi, NULL); -} - -static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id) -{ - struct bfin_spi_master *drv_data = dev_id; - u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma); - - clear_dma_irqstat(drv_data->tx_dma); - if (dma_stat & DMA_DONE) { - drv_data->tx_num++; - } else { - dev_err(&drv_data->master->dev, - "spi tx dma error: %d\n", dma_stat); - if (drv_data->tx) - drv_data->state = ERROR_STATE; - } - bfin_write_and(&drv_data->regs->tx_control, ~SPI_TXCTL_TDR_NF); - return IRQ_HANDLED; -} - -static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id) -{ - struct bfin_spi_master *drv_data = dev_id; - struct spi_message *msg = drv_data->cur_msg; - u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma); - - clear_dma_irqstat(drv_data->rx_dma); - if (dma_stat & DMA_DONE) { - drv_data->rx_num++; - /* we may fail on tx dma */ - if (drv_data->state != ERROR_STATE) - msg->actual_length += drv_data->transfer_len; - } else { - drv_data->state = ERROR_STATE; - dev_err(&drv_data->master->dev, - "spi rx dma error: %d\n", dma_stat); - } - bfin_write(&drv_data->regs->tx_control, 0); - bfin_write(&drv_data->regs->rx_control, 0); - if (drv_data->rx_num != drv_data->tx_num) - dev_dbg(&drv_data->master->dev, - "dma interrupt missing: tx=%d,rx=%d\n", - drv_data->tx_num, drv_data->rx_num); - tasklet_schedule(&drv_data->pump_transfers); - return IRQ_HANDLED; -} - -static int bfin_spi_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct bfin_spi3_master *info = dev_get_platdata(dev); - struct spi_master *master; - struct bfin_spi_master *drv_data; - struct resource *mem, *res; - unsigned int tx_dma, rx_dma; - unsigned long sclk; - int ret; - - if (!info) { - dev_err(dev, "platform data missing!\n"); - return -ENODEV; - } - - sclk = get_sclk1(); - if (!sclk) { - dev_err(dev, "can not get sclk1\n"); - return -ENXIO; - } - - res = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (!res) { - dev_err(dev, "can not get tx dma resource\n"); - return -ENXIO; - } - tx_dma = res->start; - - res = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (!res) { - dev_err(dev, "can not get rx dma resource\n"); - return -ENXIO; - } - rx_dma = res->start; - - /* allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(*drv_data)); - if (!master) { - dev_err(dev, "can not alloc spi_master\n"); - return -ENOMEM; - } - platform_set_drvdata(pdev, master); - - /* the mode bits supported by this driver */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; - - master->bus_num = pdev->id; - master->num_chipselect = info->num_chipselect; - master->cleanup = bfin_spi_cleanup; - master->setup = bfin_spi_setup; - master->transfer_one_message = bfin_spi_transfer_one_message; - master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | - SPI_BPW_MASK(8); - - drv_data = spi_master_get_devdata(master); - drv_data->master = master; - drv_data->tx_dma = tx_dma; - drv_data->rx_dma = rx_dma; - drv_data->pin_req = info->pin_req; - drv_data->sclk = sclk; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - drv_data->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(drv_data->regs)) { - ret = PTR_ERR(drv_data->regs); - goto err_put_master; - } - - /* request tx and rx dma */ - ret = request_dma(tx_dma, "SPI_TX_DMA"); - if (ret) { - dev_err(dev, "can not request SPI TX DMA channel\n"); - goto err_put_master; - } - set_dma_callback(tx_dma, bfin_spi_tx_dma_isr, drv_data); - - ret = request_dma(rx_dma, "SPI_RX_DMA"); - if (ret) { - dev_err(dev, "can not request SPI RX DMA channel\n"); - goto err_free_tx_dma; - } - set_dma_callback(drv_data->rx_dma, bfin_spi_rx_dma_isr, drv_data); - - /* request CLK, MOSI and MISO */ - ret = peripheral_request_list(drv_data->pin_req, "bfin-spi3"); - if (ret < 0) { - dev_err(dev, "can not request spi pins\n"); - goto err_free_rx_dma; - } - - bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); - bfin_write(&drv_data->regs->ssel, 0x0000FE00); - bfin_write(&drv_data->regs->delay, 0x0); - - tasklet_init(&drv_data->pump_transfers, - bfin_spi_pump_transfers, (unsigned long)drv_data); - /* register with the SPI framework */ - ret = devm_spi_register_master(dev, master); - if (ret) { - dev_err(dev, "can not register spi master\n"); - goto err_free_peripheral; - } - - return ret; - -err_free_peripheral: - peripheral_free_list(drv_data->pin_req); -err_free_rx_dma: - free_dma(rx_dma); -err_free_tx_dma: - free_dma(tx_dma); -err_put_master: - spi_master_put(master); - - return ret; -} - -static int bfin_spi_remove(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct bfin_spi_master *drv_data = spi_master_get_devdata(master); - - bfin_spi_disable(drv_data); - - peripheral_free_list(drv_data->pin_req); - free_dma(drv_data->rx_dma); - free_dma(drv_data->tx_dma); - - return 0; -} - -#ifdef CONFIG_PM -static int bfin_spi_suspend(struct device *dev) -{ - struct spi_master *master = dev_get_drvdata(dev); - struct bfin_spi_master *drv_data = spi_master_get_devdata(master); - - spi_master_suspend(master); - - drv_data->control = bfin_read(&drv_data->regs->control); - drv_data->ssel = bfin_read(&drv_data->regs->ssel); - - bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); - bfin_write(&drv_data->regs->ssel, 0x0000FE00); - dma_disable_irq(drv_data->rx_dma); - dma_disable_irq(drv_data->tx_dma); - - return 0; -} - -static int bfin_spi_resume(struct device *dev) -{ - struct spi_master *master = dev_get_drvdata(dev); - struct bfin_spi_master *drv_data = spi_master_get_devdata(master); - int ret = 0; - - /* bootrom may modify spi and dma status when resume in spi boot mode */ - disable_dma(drv_data->rx_dma); - - dma_enable_irq(drv_data->rx_dma); - dma_enable_irq(drv_data->tx_dma); - bfin_write(&drv_data->regs->control, drv_data->control); - bfin_write(&drv_data->regs->ssel, drv_data->ssel); - - ret = spi_master_resume(master); - if (ret) { - free_dma(drv_data->rx_dma); - free_dma(drv_data->tx_dma); - } - - return ret; -} -#endif -static const struct dev_pm_ops bfin_spi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(bfin_spi_suspend, bfin_spi_resume) -}; - -MODULE_ALIAS("platform:bfin-spi3"); -static struct platform_driver bfin_spi_driver = { - .driver = { - .name = "bfin-spi3", - .owner = THIS_MODULE, - .pm = &bfin_spi_pm_ops, - }, - .remove = bfin_spi_remove, -}; - -module_platform_driver_probe(bfin_spi_driver, bfin_spi_probe); - -MODULE_DESCRIPTION("Analog Devices SPI3 controller driver"); -MODULE_AUTHOR("Scott Jiang "); -MODULE_LICENSE("GPL v2"); diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h new file mode 100644 index 000000000000..c84123aa1d06 --- /dev/null +++ b/include/linux/spi/adi_spi3.h @@ -0,0 +1,254 @@ +/* + * Analog Devices SPI3 controller driver + * + * Copyright (c) 2014 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ADI_SPI3_H_ +#define _ADI_SPI3_H_ + +#include + +/* SPI_CONTROL */ +#define SPI_CTL_EN 0x00000001 /* Enable */ +#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */ +#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */ +#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */ +#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */ +#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */ +#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */ +#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */ +#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */ +#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */ +#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */ +#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */ +#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */ +#define SPI_CTL_LSBF 0x00001000 /* LSB First */ +#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */ +#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */ +#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */ +#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */ +#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */ +#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */ +#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */ +#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */ +#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */ +#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */ +#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */ +#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */ +#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */ +/* SPI_RX_CONTROL */ +#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */ +#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */ +#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */ +#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */ +#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */ +#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */ +#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */ +#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */ +#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */ +#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */ +#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */ +#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */ +#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */ +#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */ +#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */ +#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */ +#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */ +#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */ +#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */ +#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */ +#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */ +#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */ +/* SPI_TX_CONTROL */ +#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */ +#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */ +#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */ +#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */ +#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */ +#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */ +#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */ +#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */ +#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */ +#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */ +#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */ +#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */ +#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */ +#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */ +#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */ +#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */ +#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */ +#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */ +#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */ +#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */ +#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */ +#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */ +/* SPI_CLOCK */ +#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */ +/* SPI_DELAY */ +#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */ +#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */ +#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */ +/* SPI_SSEL */ +#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */ +#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */ +#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */ +#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */ +#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */ +#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */ +#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */ +#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */ +#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */ +#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */ +#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */ +#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */ +#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */ +#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */ +/* SPI_RWC */ +#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */ +/* SPI_RWCR */ +#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */ +/* SPI_TWC */ +#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */ +/* SPI_TWCR */ +#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */ +/* SPI_IMASK */ +#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ +#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ +#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ +#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ +#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */ +#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */ +#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */ +#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ +/* SPI_IMASKCL */ +#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ +#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ +#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ +#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ +#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */ +#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */ +#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */ +#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ +/* SPI_IMASKST */ +#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ +#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ +#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ +#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ +#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */ +#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */ +#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */ +#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ +/* SPI_STATUS */ +#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */ +#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */ +#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */ +#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */ +#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */ +#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */ +#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */ +#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */ +#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */ +#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */ +#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */ +#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */ +#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */ +#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */ +#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */ +#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */ +#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */ +#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */ +#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */ +#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */ +#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */ +#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */ +#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */ +#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */ +#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */ +#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */ +/* SPI_ILAT */ +#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */ +#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */ +#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */ +#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */ +#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */ +#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */ +#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */ +#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */ +#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */ +#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */ +/* SPI_ILATCL */ +#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */ +#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */ +#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */ +#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */ +#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */ +#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */ +#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */ +#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */ +#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */ +#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */ + +/* + * adi spi3 registers layout + */ +struct adi_spi_regs { + u32 revid; + u32 control; + u32 rx_control; + u32 tx_control; + u32 clock; + u32 delay; + u32 ssel; + u32 rwc; + u32 rwcr; + u32 twc; + u32 twcr; + u32 reserved0; + u32 emask; + u32 emaskcl; + u32 emaskst; + u32 reserved1; + u32 status; + u32 elat; + u32 elatcl; + u32 reserved2; + u32 rfifo; + u32 reserved3; + u32 tfifo; +}; + +#define MAX_CTRL_CS 8 /* cs in spi controller */ + +/* device.platform_data for SSP controller devices */ +struct adi_spi3_master { + u16 num_chipselect; + u16 pin_req[7]; +}; + +/* spi_board_info.controller_data for SPI slave devices, + * copied to spi_device.platform_data ... mostly for dma tuning + */ +struct adi_spi3_chip { + u32 control; + u16 cs_chg_udelay; /* Some devices require 16-bit delays */ + u32 tx_dummy_val; /* tx value for rx only transfer */ + bool enable_dma; +}; + +#endif /* _ADI_SPI3_H_ */ -- cgit v1.2.3 From e885cd805fc6e65ef5150a211c7bac02f925af04 Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Fri, 10 Jan 2014 14:26:06 -0500 Subject: efi: create memory map iteration helper There are a lot of places in the kernel which iterate through an EFI memory map. Most of these places use essentially the same for-loop code. This patch adds a for_each_efi_memory_desc() helper to clean up all of the existing duplicate code and avoid more in the future. Signed-off-by: Mark Salter Signed-off-by: Leif Lindholm Signed-off-by: Matt Fleming --- include/linux/efi.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/efi.h b/include/linux/efi.h index 6c100ff0cae4..82d0abb2b19f 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -863,6 +863,12 @@ extern int efi_set_rtc_mmss(const struct timespec *now); extern void efi_reserve_boot_services(void); extern struct efi_memory_map memmap; +/* Iterate through an efi_memory_map */ +#define for_each_efi_memory_desc(m, md) \ + for ((md) = (m)->map; \ + (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ + (md) = (void *)(md) + (m)->desc_size) + /** * efi_range_is_wc - check the WC bit on an address range * @start: starting kvirt address -- cgit v1.2.3 From 97f53d710b9f63cbef1c86ee39d9ecfdda6e674c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 10:09:07 +0200 Subject: regulator: s2mps11: Add external GPIO control for S2MPS14 Add support for external control over GPIO for LDO10, LDO11 and LDO12 S2MPS14 regulators. External control can be turned on by writing 0x0 to control register which in case of other regulators is used for disabling them. These LDO10-LDO12 regulators can be disabled only by I2C GPIO or PWREN pin so the patch actually allows proper way of disabling them. Additionally the GPIO control has two benefits: - It is faster than toggling it over I2C bus. - It allows disabling the regulator during suspend to RAM; The AP will enable it during resume. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Mark Brown --- drivers/regulator/s2mps11.c | 67 +++++++++++++++++++++++++++++++++++-- include/linux/mfd/samsung/s2mps14.h | 2 ++ 2 files changed, 67 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 3aba0331fb5d..6dad0aa74a47 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,8 @@ struct s2mps11_info { * was enabled. */ unsigned int s2mps14_suspend_state:30; + /* Array of size rdev_num with GPIO-s for external sleep control */ + int *ext_control_gpio; }; static int get_ramp_delay(int ramp_delay) @@ -409,6 +412,8 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) val = S2MPS14_ENABLE_SUSPEND; + else if (s2mps11->ext_control_gpio[rdev_get_id(rdev)]) + val = S2MPS14_ENABLE_EXT_CONTROL; else val = rdev->desc->enable_mask; @@ -565,8 +570,40 @@ static const struct regulator_desc s2mps14_regulators[] = { regulator_desc_s2mps14_buck1235(5), }; -static int s2mps11_pmic_dt_parse(struct platform_device *pdev, +static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11, + struct regulator_dev *rdev) +{ + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + rdev->desc->enable_mask, S2MPS14_ENABLE_EXT_CONTROL); +} + +static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev, struct of_regulator_match *rdata, struct s2mps11_info *s2mps11) +{ + int *gpio = s2mps11->ext_control_gpio; + unsigned int i; + unsigned int valid_regulators[3] = { S2MPS14_LDO10, S2MPS14_LDO11, + S2MPS14_LDO12 }; + + for (i = 0; i < ARRAY_SIZE(valid_regulators); i++) { + unsigned int reg = valid_regulators[i]; + + if (!rdata[reg].init_data || !rdata[reg].of_node) + continue; + + gpio[reg] = of_get_named_gpio(rdata[reg].of_node, + "samsung,ext-control-gpios", 0); + if (!gpio_is_valid(gpio[reg])) + gpio[reg] = 0; + else + dev_dbg(&pdev->dev, "Using GPIO %d for ext-control over %d/%s\n", + gpio[reg], reg, rdata[reg].name); + } +} + +static int s2mps11_pmic_dt_parse(struct platform_device *pdev, + struct of_regulator_match *rdata, struct s2mps11_info *s2mps11, + enum sec_device_type dev_type) { struct device_node *reg_np; @@ -577,6 +614,9 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev, } of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num); + if (dev_type == S2MPS14X) + s2mps14_pmic_dt_parse_ext_control_gpio(pdev, rdata, s2mps11); + of_node_put(reg_np); return 0; @@ -613,6 +653,12 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) return -EINVAL; }; + s2mps11->ext_control_gpio = devm_kzalloc(&pdev->dev, + sizeof(*s2mps11->ext_control_gpio) * s2mps11->rdev_num, + GFP_KERNEL); + if (!s2mps11->ext_control_gpio) + return -ENOMEM; + if (!iodev->dev->of_node) { if (iodev->pdata) { pdata = iodev->pdata; @@ -631,7 +677,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) for (i = 0; i < s2mps11->rdev_num; i++) rdata[i].name = regulators[i].name; - ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11); + ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, dev_type); if (ret) goto out; @@ -652,6 +698,12 @@ common_reg: config.of_node = rdata[i].of_node; } + if (s2mps11->ext_control_gpio[i]) { + config.ena_gpio = s2mps11->ext_control_gpio[i]; + config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH; + } else + config.ena_gpio = config.ena_gpio_flags = 0; + regulator = devm_regulator_register(&pdev->dev, ®ulators[i], &config); if (IS_ERR(regulator)) { @@ -660,6 +712,17 @@ common_reg: i); goto out; } + + if (s2mps11->ext_control_gpio[i]) { + ret = s2mps14_pmic_enable_ext_control(s2mps11, + regulator); + if (ret < 0) { + dev_err(&pdev->dev, + "failed to enable GPIO control over %s: %d\n", + regulator->desc->name, ret); + goto out; + } + } } out: diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h index 4b449b8ac548..900cd7a04314 100644 --- a/include/linux/mfd/samsung/s2mps14.h +++ b/include/linux/mfd/samsung/s2mps14.h @@ -148,6 +148,8 @@ enum s2mps14_regulators { #define S2MPS14_ENABLE_SHIFT 6 /* On/Off controlled by PWREN */ #define S2MPS14_ENABLE_SUSPEND (0x01 << S2MPS14_ENABLE_SHIFT) +/* On/Off controlled by LDO10EN or EMMCEN */ +#define S2MPS14_ENABLE_EXT_CONTROL (0x00 << S2MPS14_ENABLE_SHIFT) #define S2MPS14_LDO_N_VOLTAGES (S2MPS14_LDO_VSEL_MASK + 1) #define S2MPS14_BUCK_N_VOLTAGES (S2MPS14_BUCK_VSEL_MASK + 1) -- cgit v1.2.3 From 73679e50820123ebdedc67ebcda4562d1d6e4aba Mon Sep 17 00:00:00 2001 From: Pranith Kumar Date: Tue, 15 Apr 2014 12:05:22 -0400 Subject: compiler-intel.h: Remove duplicate definition barrier is already defined as __memory_barrier in compiler.h Remove this unnecessary redefinition. Signed-off-by: Pranith Kumar Link: http://lkml.kernel.org/r/CAJhHMCAnYPy0%2BqD-1KBnJPLt3XgAjdR12j%2BySSnPgmZcpbE7HQ@mail.gmail.com Signed-off-by: H. Peter Anvin --- include/linux/compiler-intel.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 5529c5239421..ba147a1727e6 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -13,12 +13,9 @@ /* Intel ECC compiler doesn't support gcc specific asm stmts. * It uses intrinsics to do the equivalent things. */ -#undef barrier #undef RELOC_HIDE #undef OPTIMIZER_HIDE_VAR -#define barrier() __memory_barrier() - #define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ __ptr = (unsigned long) (ptr); \ -- cgit v1.2.3 From b4f42e2831ff9b9fa19252265d7c8985d47eefb9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Apr 2014 09:46:28 -0600 Subject: block: remove struct request buffer member This was used in the olden days, back when onions were proper yellow. Basically it mapped to the current buffer to be transferred. With highmem being added more than a decade ago, most drivers map pages out of a bio, and rq->buffer isn't pointing at anything valid. Convert old style drivers to just use bio_data(). For the discard payload use case, just reference the page in the bio. Signed-off-by: Jens Axboe --- block/blk-core.c | 21 ++++++--------------- block/blk-map.c | 3 --- drivers/block/amiflop.c | 2 +- drivers/block/ataflop.c | 2 +- drivers/block/floppy.c | 18 +++++++++--------- drivers/block/hd.c | 10 +++++----- drivers/block/mg_disk.c | 12 ++++++------ drivers/block/paride/pcd.c | 2 +- drivers/block/paride/pd.c | 4 ++-- drivers/block/paride/pf.c | 4 ++-- drivers/block/skd_main.c | 5 ++--- drivers/block/swim.c | 2 +- drivers/block/swim3.c | 6 +++--- drivers/block/xen-blkfront.c | 4 ++-- drivers/block/xsysace.c | 4 ++-- drivers/block/z2ram.c | 6 ++++-- drivers/ide/ide-disk.c | 5 ++--- drivers/md/dm.c | 1 - drivers/mtd/mtd_blkdevs.c | 3 +-- drivers/mtd/ubi/block.c | 2 +- drivers/scsi/scsi_lib.c | 3 --- drivers/scsi/sd.c | 10 ++++------ include/linux/blkdev.h | 1 - 23 files changed, 55 insertions(+), 75 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 1fe9ff6e6802..ae6227fd07aa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -146,8 +146,8 @@ void blk_dump_rq_flags(struct request *rq, char *msg) printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); - printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", - rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); + printk(KERN_INFO " bio %p, biotail %p, len %u\n", + rq->bio, rq->biotail, blk_rq_bytes(rq)); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { printk(KERN_INFO " cdb: "); @@ -1360,7 +1360,6 @@ void blk_add_request_payload(struct request *rq, struct page *page, rq->__data_len = rq->resid_len = len; rq->nr_phys_segments = 1; - rq->buffer = bio_data(bio); } EXPORT_SYMBOL_GPL(blk_add_request_payload); @@ -1402,12 +1401,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; req->bio = bio; - /* - * may not be valid. if the low level driver said - * it didn't need a bounce buffer then it better - * not touch req->buffer either... - */ - req->buffer = bio_data(bio); req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); @@ -2434,7 +2427,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) } req->__data_len -= total_bytes; - req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ if (req->cmd_type == REQ_TYPE_FS) @@ -2752,10 +2744,9 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= bio->bi_rw & REQ_WRITE; - if (bio_has_data(bio)) { + if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); - rq->buffer = bio_data(bio); - } + rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; @@ -2831,7 +2822,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /* * Copy attributes of the original request to the clone request. - * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + * The actual data parts (e.g. ->cmd, ->sense) are not copied. */ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { @@ -2857,7 +2848,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. - * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * The actual data parts of @rq_src (e.g. ->cmd, ->sense) * are not copied, and copying such parts is the caller's responsibility. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. diff --git a/block/blk-map.c b/block/blk-map.c index f7b22bc21518..f890d4345b0c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -238,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -325,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, } blk_queue_bounce(q, &rq->bio); - rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 748dea4f34dc..758da2287d9a 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1406,7 +1406,7 @@ next_segment: track = block / (floppy->dtype->sects * floppy->type->sect_mult); sector = block % (floppy->dtype->sects * floppy->type->sect_mult); - data = rq->buffer + 512 * cnt; + data = bio_data(rq->bio) + 512 * cnt; #ifdef DEBUG printk("access to track %d, sector %d, with buffer at " "0x%08lx\n", track, sector, data); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 96b629e1f0c9..7e8a55f8917c 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1484,7 +1484,7 @@ repeat: ReqCnt = 0; ReqCmd = rq_data_dir(fd_request); ReqBlock = blk_rq_pos(fd_request); - ReqBuffer = fd_request->buffer; + ReqBuffer = bio_data(fd_request->bio); setup_req_params( drive ); do_fd_action( drive ); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 8f5565bf34cd..5f69c910c3ac 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2351,7 +2351,7 @@ static void rw_interrupt(void) } if (CT(COMMAND) != FD_READ || - raw_cmd->kernel_data == current_req->buffer) { + raw_cmd->kernel_data == bio_data(current_req->bio)) { /* transfer directly from buffer */ cont->done(1); } else if (CT(COMMAND) == FD_READ) { @@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void) raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); - } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { + } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; @@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void) */ max_size = buffer_chain_size(); dma_limit = (MAX_DMA_ADDRESS - - ((unsigned long)current_req->buffer)) >> 9; + ((unsigned long)bio_data(current_req->bio))) >> 9; if ((unsigned long)max_size > dma_limit) max_size = dma_limit; /* 64 kb boundaries */ - if (CROSS_64KB(current_req->buffer, max_size << 9)) + if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) max_size = (K_64 - - ((unsigned long)current_req->buffer) % + ((unsigned long)bio_data(current_req->bio)) % K_64) >> 9; direct = transfer_size(ssize, max_sector, max_size) - fsector_t; /* @@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void) (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { - raw_cmd->kernel_data = current_req->buffer; + raw_cmd->kernel_data = bio_data(current_req->bio); raw_cmd->length = current_count_sectors << 9; if (raw_cmd->length == 0) { DPRINT("%s: zero dma transfer attempted\n", __func__); @@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void) raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || - (raw_cmd->kernel_data != current_req->buffer && + (raw_cmd->kernel_data != bio_data(current_req->bio) && CT(COMMAND) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || @@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void) raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); - if (raw_cmd->kernel_data != current_req->buffer) + if (raw_cmd->kernel_data != bio_data(current_req->bio)) pr_info("addr=%d, length=%ld\n", (int)((raw_cmd->kernel_data - floppy_track_buffer) >> 9), @@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void) return 0; } - if (raw_cmd->kernel_data != current_req->buffer) { + if (raw_cmd->kernel_data != bio_data(current_req->bio)) { if (raw_cmd->kernel_data < floppy_track_buffer || current_count_sectors < 0 || raw_cmd->length < 0 || diff --git a/drivers/block/hd.c b/drivers/block/hd.c index bf397bf108b7..8a290c08262f 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -464,11 +464,11 @@ static void read_intr(void) ok_to_read: req = hd_req; - insw(HD_DATA, req->buffer, 256); + insw(HD_DATA, bio_data(req->bio), 256); #ifdef DEBUG printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", req->rq_disk->disk_name, blk_rq_pos(req) + 1, - blk_rq_sectors(req) - 1, req->buffer+512); + blk_rq_sectors(req) - 1, bio_data(req->bio)+512); #endif if (hd_end_request(0, 512)) { SET_HANDLER(&read_intr); @@ -505,7 +505,7 @@ static void write_intr(void) ok_to_write: if (hd_end_request(0, 512)) { SET_HANDLER(&write_intr); - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); return; } @@ -624,7 +624,7 @@ repeat: printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", req->rq_disk->disk_name, req_data_dir(req) == READ ? "read" : "writ", - cyl, head, sec, nsect, req->buffer); + cyl, head, sec, nsect, bio_data(req->bio)); #endif if (req->cmd_type == REQ_TYPE_FS) { switch (rq_data_dir(req)) { @@ -643,7 +643,7 @@ repeat: bad_rw_intr(); goto repeat; } - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); break; default: printk("unknown hd-command\n"); diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index eb59b1241366..e352cac707e8 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host, static void mg_read_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -496,7 +496,7 @@ static void mg_read(struct request *req) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - blk_rq_sectors(req), blk_rq_pos(req), req->buffer); + blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); do { if (mg_wait(host, ATA_DRQ, @@ -514,7 +514,7 @@ static void mg_read(struct request *req) static void mg_write_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -534,7 +534,7 @@ static void mg_write(struct request *req) } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - rem, blk_rq_pos(req), req->buffer); + rem, blk_rq_pos(req), bio_data(req->bio)); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { @@ -585,7 +585,7 @@ ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); + blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); @@ -624,7 +624,7 @@ ok_to_write: /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req), req->buffer); + blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e76bdc074dbe..719cb1bc1640 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q) pcd_current = cd; pcd_sector = blk_rq_pos(pcd_req); pcd_count = blk_rq_cur_sectors(pcd_req); - pcd_buf = pcd_req->buffer; + pcd_buf = bio_data(pcd_req->bio); pcd_busy = 1; ps_set_intr(do_pcd_read, NULL, 0, nice); return; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 19ad8f0c83ef..fea7e76a00de 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -454,7 +454,7 @@ static enum action do_pd_io_start(void) if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) return Fail; pd_run = blk_rq_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); pd_retries = 0; if (pd_cmd == READ) return do_pd_read_start(); @@ -485,7 +485,7 @@ static int pd_next_buf(void) spin_lock_irqsave(&pd_lock, saved_flags); __blk_end_request_cur(pd_req, 0); pd_count = blk_rq_cur_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); spin_unlock_irqrestore(&pd_lock, saved_flags); return 0; } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index f5c86d523ba0..9a15fd3c9349 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -795,7 +795,7 @@ repeat: } pf_cmd = rq_data_dir(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); pf_retries = 0; pf_busy = 1; @@ -827,7 +827,7 @@ static int pf_next_buf(void) if (!pf_req) return 1; pf_count = blk_rq_cur_sectors(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); } return 0; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a69dd93d1bd5..36bcedfd930c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, req = skreq->req; blk_add_request_payload(req, page, len); - req->buffer = buf; } static void skd_request_fn_not_online(struct request_queue *q); @@ -856,10 +855,10 @@ static void skd_end_request(struct skd_device *skdev, if ((io_flags & REQ_DISCARD) && (skreq->discard_page == 1)) { + struct bio *bio = req->bio; pr_debug("%s:%s:%d, free the page!", skdev->name, __func__, __LINE__); - free_page((unsigned long)req->buffer); - req->buffer = NULL; + __free_page(bio->bi_io_vec->bv_page); } if (unlikely(error)) { diff --git a/drivers/block/swim.c b/drivers/block/swim.c index b02d53a399f3..6b44bbe528b7 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q) case READ: err = floppy_read_sectors(fs, blk_rq_pos(req), blk_rq_cur_sectors(req), - req->buffer); + bio_data(req->bio)); break; } done: diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c74f7b56e7c4..523ee8fd4c15 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs) swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", req->rq_disk->disk_name, req->cmd, (long)blk_rq_pos(req), blk_rq_sectors(req), - req->buffer); + bio_data(req->bio)); swim3_dbg(" errors=%d current_nr_sectors=%u\n", req->errors, blk_rq_cur_sectors(req)); #endif @@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs) /* Set up 3 dma commands: write preamble, data, postamble */ init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); ++cp; - init_dma(cp, OUTPUT_MORE, req->buffer, 512); + init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); ++cp; init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); } else { - init_dma(cp, INPUT_LAST, req->buffer, n * 512); + init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index efe1b4761735..283a30e88287 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq) } pr_debug("do_blk_req %p: cmd %p, sec %lx, " - "(%u/%u) buffer:%p [%s]\n", + "(%u/%u) [%s]\n", req, req->cmd, (unsigned long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), - req->buffer, rq_data_dir(req) ? "write" : "read"); + rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 1393b8871a28..ab3ea62e5dfc 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace) rq_data_dir(req)); ace->req = req; - ace->data_ptr = req->buffer; + ace->data_ptr = bio_data(req->bio); ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); @@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace) * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ - ace->data_ptr = ace->req->buffer; + ace->data_ptr = bio_data(ace->req->bio); ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 27de5046708a..968f9e52effa 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q) while (len) { unsigned long addr = start & Z2RAM_CHUNKMASK; unsigned long size = Z2RAM_CHUNKSIZE - addr; + void *buffer = bio_data(req->bio); + if (len < size) size = len; addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; if (rq_data_dir(req) == READ) - memcpy(req->buffer, (char *)addr, size); + memcpy(buffer, (char *)addr, size); else - memcpy((char *)addr, req->buffer, size); + memcpy((char *)addr, buffer, size); start += size; len -= size; } diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 16f69be820c7..ee880382e3bc 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ledtrig_ide_activity(); - pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", + pr_debug("%s: %sing: block=%llu, sectors=%u\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", - (unsigned long long)block, blk_rq_sectors(rq), - (unsigned long)rq->buffer); + (unsigned long long)block, blk_rq_sectors(rq)); if (hwif->rw_disk) hwif->rw_disk(drive, rq); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 455e64916498..6a71bc7c9133 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq, clone->cmd = rq->cmd; clone->cmd_len = rq->cmd_len; clone->sense = rq->sense; - clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0b2ccb68c0d0..4dbfaee9aa95 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, block = blk_rq_pos(req) << 9 >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift; - - buf = req->buffer; + buf = bio_data(req->bio); if (req->cmd_type != REQ_TYPE_FS) return -EIO; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 7ff473c871a9..ee774ba3728d 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req) * flash access anyway. */ mutex_lock(&dev->dev_mutex); - ret = ubiblock_read(dev, req->buffer, sec, len); + ret = ubiblock_read(dev, bio_data(req->bio), sec, len); mutex_unlock(&dev->dev_mutex); return ret; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0f3bddcb6b1a..3cc82d3dec78 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1018,8 +1018,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1156,7 +1154,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index efcbcd182863..06d154d20faa 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -739,14 +739,11 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->buffer = page_address(page); rq->__data_len = nr_bytes; out: - if (ret != BLKPREP_OK) { + if (ret != BLKPREP_OK) __free_page(page); - rq->buffer = NULL; - } return ret; } @@ -843,8 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) struct scsi_cmnd *SCpnt = rq->special; if (rq->cmd_flags & REQ_DISCARD) { - free_page((unsigned long)rq->buffer); - rq->buffer = NULL; + struct bio *bio = rq->bio; + + __free_page(bio->bi_io_vec->bv_page); } if (SCpnt->cmnd != rq->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 86a8df13a5fe..eb5e94803892 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -178,7 +178,6 @@ struct request { unsigned short ioprio; void *special; /* opaque pointer available for LLD use */ - char *buffer; /* kaddr of the current segment if available */ int tag; int errors; -- cgit v1.2.3 From e9b267d91f6ddbc694cb40aa962b0b2cec03971d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Apr 2014 13:59:10 -0600 Subject: blk-mq: add ->init_request and ->exit_request methods The current blk_mq_init_commands/blk_mq_free_commands interface has a two problems: 1) Because only the constructor is passed to blk_mq_init_commands there is no easy way to clean up when a comman initialization failed. The current code simply leaks the allocations done in the constructor. 2) There is no good place to call blk_mq_free_commands: before blk_cleanup_queue there is no guarantee that all outstanding commands have completed, so we can't free them yet. After blk_cleanup_queue the queue has usually been freed. This can be worked around by grabbing an unconditional reference before calling blk_cleanup_queue and dropping it after blk_mq_free_commands is done, although that's not exatly pretty and driver writers are guaranteed to get it wrong sooner or later. Both issues are easily fixed by making the request constructor and destructor normal blk_mq_ops methods. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 105 ++++++++++++++------------------------------- drivers/block/virtio_blk.c | 23 +++++----- include/linux/blk-mq.h | 14 +++++- 3 files changed, 55 insertions(+), 87 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index e644feec068c..48d2d8495f5e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1031,74 +1031,20 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_put_ctx(ctx); } -static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, - int (*init)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) +static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx, void *driver_data) { - unsigned int i; - int ret = 0; - - for (i = 0; i < hctx->queue_depth; i++) { - struct request *rq = hctx->rqs[i]; - - ret = init(data, hctx, rq, i); - if (ret) - break; - } - - return ret; -} - -int blk_mq_init_commands(struct request_queue *q, - int (*init)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - struct blk_mq_hw_ctx *hctx; - unsigned int i; - int ret = 0; - - queue_for_each_hw_ctx(q, hctx, i) { - ret = blk_mq_init_hw_commands(hctx, init, data); - if (ret) - break; - } - - return ret; -} -EXPORT_SYMBOL(blk_mq_init_commands); - -static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx, - void (*free)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - unsigned int i; + struct page *page; - for (i = 0; i < hctx->queue_depth; i++) { - struct request *rq = hctx->rqs[i]; + if (hctx->rqs && hctx->queue->mq_ops->exit_request) { + int i; - free(data, hctx, rq, i); + for (i = 0; i < hctx->queue_depth; i++) { + if (!hctx->rqs[i]) + continue; + hctx->queue->mq_ops->exit_request(driver_data, hctx, + hctx->rqs[i], i); + } } -} - -void blk_mq_free_commands(struct request_queue *q, - void (*free)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - struct blk_mq_hw_ctx *hctx; - unsigned int i; - - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_free_hw_commands(hctx, free, data); -} -EXPORT_SYMBOL(blk_mq_free_commands); - -static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) -{ - struct page *page; while (!list_empty(&hctx->page_list)) { page = list_first_entry(&hctx->page_list, struct page, lru); @@ -1123,10 +1069,12 @@ static size_t order_to_size(unsigned int order) } static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, - unsigned int reserved_tags, int node) + struct blk_mq_reg *reg, void *driver_data, int node) { + unsigned int reserved_tags = reg->reserved_tags; unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; + int error; INIT_LIST_HEAD(&hctx->page_list); @@ -1175,14 +1123,23 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, for (j = 0; j < to_do; j++) { hctx->rqs[i] = p; blk_rq_init(hctx->queue, hctx->rqs[i]); + if (reg->ops->init_request) { + error = reg->ops->init_request(driver_data, + hctx, hctx->rqs[i], i); + if (error) + goto err_rq_map; + } + p += rq_size; i++; } } - if (i < (reserved_tags + BLK_MQ_TAG_MIN)) + if (i < (reserved_tags + BLK_MQ_TAG_MIN)) { + error = -ENOMEM; goto err_rq_map; - else if (i != hctx->queue_depth) { + } + if (i != hctx->queue_depth) { hctx->queue_depth = i; pr_warn("%s: queue depth set to %u because of low memory\n", __func__, i); @@ -1190,12 +1147,14 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); if (!hctx->tags) { -err_rq_map: - blk_mq_free_rq_map(hctx); - return -ENOMEM; + error = -ENOMEM; + goto err_rq_map; } return 0; +err_rq_map: + blk_mq_free_rq_map(hctx, driver_data); + return error; } static int blk_mq_init_hw_queues(struct request_queue *q, @@ -1228,7 +1187,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, blk_mq_hctx_notify, hctx); blk_mq_register_cpu_notifier(&hctx->cpu_notifier); - if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node)) + if (blk_mq_init_rq_map(hctx, reg, driver_data, node)) break; /* @@ -1268,7 +1227,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, reg->ops->exit_hctx(hctx, j); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - blk_mq_free_rq_map(hctx); + blk_mq_free_rq_map(hctx, driver_data); kfree(hctx->ctxs); } @@ -1455,7 +1414,7 @@ void blk_mq_free_queue(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { kfree(hctx->ctx_map); kfree(hctx->ctxs); - blk_mq_free_rq_map(hctx); + blk_mq_free_rq_map(hctx, q->queuedata); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c7d02bc9d945..d06206abd340 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -480,11 +480,22 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); +static int virtblk_init_request(void *data, struct blk_mq_hw_ctx *hctx, + struct request *rq, unsigned int nr) +{ + struct virtio_blk *vblk = data; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); + + sg_init_table(vbr->sg, vblk->sg_elems); + return 0; +} + static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .map_queue = blk_mq_map_queue, .alloc_hctx = blk_mq_alloc_single_hw_queue, .free_hctx = blk_mq_free_single_hw_queue, + .init_request = virtblk_init_request, .complete = virtblk_request_done, }; @@ -497,16 +508,6 @@ static struct blk_mq_reg virtio_mq_reg = { }; module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); -static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, - struct request *rq, unsigned int nr) -{ - struct virtio_blk *vblk = data; - struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); - - sg_init_table(vbr->sg, vblk->sg_elems); - return 0; -} - static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -577,8 +578,6 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_put_disk; } - blk_mq_init_commands(q, virtblk_init_vbr, vblk); - q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b6ee48740458..29c1a6e83814 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -67,6 +67,10 @@ typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int); +typedef void (exit_request_fn)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int); struct blk_mq_ops { /* @@ -99,6 +103,14 @@ struct blk_mq_ops { */ init_hctx_fn *init_hctx; exit_hctx_fn *exit_hctx; + + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. + * Ditto for exit/teardown. + */ + init_request_fn *init_request; + exit_request_fn *exit_request; }; enum { @@ -118,8 +130,6 @@ enum { struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); -int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); -void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); -- cgit v1.2.3 From 24d2f90309b23f2cfe016b2aebc5f0d6e01c57fd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Apr 2014 14:14:00 -0600 Subject: blk-mq: split out tag initialization, support shared tags Add a new blk_mq_tag_set structure that gets set up before we initialize the queue. A single blk_mq_tag_set structure can be shared by multiple queues. Signed-off-by: Christoph Hellwig Modular export of blk_mq_{alloc,free}_tagset added by me. Signed-off-by: Jens Axboe --- block/blk-mq-cpumap.c | 6 +- block/blk-mq-tag.c | 14 --- block/blk-mq-tag.h | 19 +++- block/blk-mq.c | 244 +++++++++++++++++++++++++-------------------- block/blk-mq.h | 5 +- drivers/block/null_blk.c | 92 ++++++++++------- drivers/block/virtio_blk.c | 48 +++++---- include/linux/blk-mq.h | 34 +++---- 8 files changed, 262 insertions(+), 200 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 097921329619..5d0f93cf358c 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -80,17 +80,17 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) return 0; } -unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg) +unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) { unsigned int *map; /* If cpus are offline, map them to first hctx */ map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, - reg->numa_node); + set->numa_node); if (!map) return NULL; - if (!blk_mq_update_queue_map(map, reg->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) return map; kfree(map); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 83ae96c51a27..7a799c46c32d 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -1,25 +1,11 @@ #include #include -#include #include #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" -/* - * Per tagged queue (tag address space) map - */ -struct blk_mq_tags { - unsigned int nr_tags; - unsigned int nr_reserved_tags; - unsigned int nr_batch_move; - unsigned int nr_max_cache; - - struct percpu_ida free_tags; - struct percpu_ida reserved_tags; -}; - void blk_mq_wait_for_tags(struct blk_mq_tags *tags) { int tag = blk_mq_get_tag(tags, __GFP_WAIT, false); diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 947ba2c6148e..b602e3fa66ea 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -1,7 +1,24 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H -struct blk_mq_tags; +#include + +/* + * Tag address space map. + */ +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int nr_batch_move; + unsigned int nr_max_cache; + + struct percpu_ida free_tags; + struct percpu_ida reserved_tags; + + struct request **rqs; + struct list_head page_list; +}; + extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); diff --git a/block/blk-mq.c b/block/blk-mq.c index 2a5a0fed10a3..9180052d42cc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -81,7 +81,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { - rq = hctx->rqs[tag]; + rq = hctx->tags->rqs[tag]; blk_rq_init(hctx->queue, rq); rq->tag = tag; @@ -404,6 +404,12 @@ static void blk_mq_requeue_request(struct request *rq) rq->nr_phys_segments--; } +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) +{ + return tags->rqs[tag]; +} +EXPORT_SYMBOL(blk_mq_tag_to_rq); + struct blk_mq_timeout_data { struct blk_mq_hw_ctx *hctx; unsigned long *next; @@ -425,12 +431,13 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) do { struct request *rq; - tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag); - if (tag >= hctx->queue_depth) + tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag); + if (tag >= hctx->tags->nr_tags) break; - rq = hctx->rqs[tag++]; - + rq = blk_mq_tag_to_rq(hctx->tags, tag++); + if (rq->q != hctx->queue) + continue; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) continue; @@ -969,11 +976,11 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg, +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, unsigned int hctx_index) { return kmalloc_node(sizeof(struct blk_mq_hw_ctx), - GFP_KERNEL | __GFP_ZERO, reg->numa_node); + GFP_KERNEL | __GFP_ZERO, set->numa_node); } EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); @@ -1030,31 +1037,31 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_put_ctx(ctx); } -static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx, void *driver_data) +static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, unsigned int hctx_idx) { struct page *page; - if (hctx->rqs && hctx->queue->mq_ops->exit_request) { + if (tags->rqs && set->ops->exit_request) { int i; - for (i = 0; i < hctx->queue_depth; i++) { - if (!hctx->rqs[i]) + for (i = 0; i < tags->nr_tags; i++) { + if (!tags->rqs[i]) continue; - hctx->queue->mq_ops->exit_request(driver_data, hctx, - hctx->rqs[i], i); + set->ops->exit_request(set->driver_data, tags->rqs[i], + hctx_idx, i); } } - while (!list_empty(&hctx->page_list)) { - page = list_first_entry(&hctx->page_list, struct page, lru); + while (!list_empty(&tags->page_list)) { + page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); __free_pages(page, page->private); } - kfree(hctx->rqs); + kfree(tags->rqs); - if (hctx->tags) - blk_mq_free_tags(hctx->tags); + blk_mq_free_tags(tags); } static size_t order_to_size(unsigned int order) @@ -1067,30 +1074,36 @@ static size_t order_to_size(unsigned int order) return ret; } -static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, - struct blk_mq_reg *reg, void *driver_data, int node) +static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + unsigned int hctx_idx) { - unsigned int reserved_tags = reg->reserved_tags; + struct blk_mq_tags *tags; unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; - int error; - INIT_LIST_HEAD(&hctx->page_list); + tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, + set->numa_node); + if (!tags) + return NULL; - hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *), - GFP_KERNEL, node); - if (!hctx->rqs) - return -ENOMEM; + INIT_LIST_HEAD(&tags->page_list); + + tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), + GFP_KERNEL, set->numa_node); + if (!tags->rqs) { + blk_mq_free_tags(tags); + return NULL; + } /* * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ - rq_size = round_up(sizeof(struct request) + hctx->cmd_size, + rq_size = round_up(sizeof(struct request) + set->cmd_size, cache_line_size()); - left = rq_size * hctx->queue_depth; + left = rq_size * set->queue_depth; - for (i = 0; i < hctx->queue_depth;) { + for (i = 0; i < set->queue_depth; ) { int this_order = max_order; struct page *page; int to_do; @@ -1100,7 +1113,8 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, this_order--; do { - page = alloc_pages_node(node, GFP_KERNEL, this_order); + page = alloc_pages_node(set->numa_node, GFP_KERNEL, + this_order); if (page) break; if (!this_order--) @@ -1110,22 +1124,22 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, } while (1); if (!page) - break; + goto fail; page->private = this_order; - list_add_tail(&page->lru, &hctx->page_list); + list_add_tail(&page->lru, &tags->page_list); p = page_address(page); entries_per_page = order_to_size(this_order) / rq_size; - to_do = min(entries_per_page, hctx->queue_depth - i); + to_do = min(entries_per_page, set->queue_depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { - hctx->rqs[i] = p; - if (reg->ops->init_request) { - error = reg->ops->init_request(driver_data, - hctx, hctx->rqs[i], i); - if (error) - goto err_rq_map; + tags->rqs[i] = p; + if (set->ops->init_request) { + if (set->ops->init_request(set->driver_data, + tags->rqs[i], hctx_idx, i, + set->numa_node)) + goto fail; } p += rq_size; @@ -1133,30 +1147,16 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, } } - if (i < (reserved_tags + BLK_MQ_TAG_MIN)) { - error = -ENOMEM; - goto err_rq_map; - } - if (i != hctx->queue_depth) { - hctx->queue_depth = i; - pr_warn("%s: queue depth set to %u because of low memory\n", - __func__, i); - } + return tags; - hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); - if (!hctx->tags) { - error = -ENOMEM; - goto err_rq_map; - } - - return 0; -err_rq_map: - blk_mq_free_rq_map(hctx, driver_data); - return error; +fail: + pr_warn("%s: failed to allocate requests\n", __func__); + blk_mq_free_rq_map(set, tags, hctx_idx); + return NULL; } static int blk_mq_init_hw_queues(struct request_queue *q, - struct blk_mq_reg *reg, void *driver_data) + struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; unsigned int i, j; @@ -1170,23 +1170,21 @@ static int blk_mq_init_hw_queues(struct request_queue *q, node = hctx->numa_node; if (node == NUMA_NO_NODE) - node = hctx->numa_node = reg->numa_node; + node = hctx->numa_node = set->numa_node; INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; hctx->queue_num = i; - hctx->flags = reg->flags; - hctx->queue_depth = reg->queue_depth; - hctx->cmd_size = reg->cmd_size; + hctx->flags = set->flags; + hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); blk_mq_register_cpu_notifier(&hctx->cpu_notifier); - if (blk_mq_init_rq_map(hctx, reg, driver_data, node)) - break; + hctx->tags = set->tags[i]; /* * Allocate space for all possible cpus to avoid allocation in @@ -1206,8 +1204,8 @@ static int blk_mq_init_hw_queues(struct request_queue *q, hctx->nr_ctx_map = num_maps; hctx->nr_ctx = 0; - if (reg->ops->init_hctx && - reg->ops->init_hctx(hctx, driver_data, i)) + if (set->ops->init_hctx && + set->ops->init_hctx(hctx, set->driver_data, i)) break; } @@ -1221,11 +1219,10 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (i == j) break; - if (reg->ops->exit_hctx) - reg->ops->exit_hctx(hctx, j); + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, j); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - blk_mq_free_rq_map(hctx, driver_data); kfree(hctx->ctxs); } @@ -1290,41 +1287,25 @@ static void blk_mq_map_swqueue(struct request_queue *q) } } -struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, - void *driver_data) +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; struct blk_mq_ctx *ctx; struct request_queue *q; int i; - if (!reg->nr_hw_queues || - !reg->ops->queue_rq || !reg->ops->map_queue || - !reg->ops->alloc_hctx || !reg->ops->free_hctx) - return ERR_PTR(-EINVAL); - - if (!reg->queue_depth) - reg->queue_depth = BLK_MQ_MAX_DEPTH; - else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) { - pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth); - reg->queue_depth = BLK_MQ_MAX_DEPTH; - } - - if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) - return ERR_PTR(-EINVAL); - ctx = alloc_percpu(struct blk_mq_ctx); if (!ctx) return ERR_PTR(-ENOMEM); - hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, - reg->numa_node); + hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, + set->numa_node); if (!hctxs) goto err_percpu; - for (i = 0; i < reg->nr_hw_queues; i++) { - hctxs[i] = reg->ops->alloc_hctx(reg, i); + for (i = 0; i < set->nr_hw_queues; i++) { + hctxs[i] = set->ops->alloc_hctx(set, i); if (!hctxs[i]) goto err_hctxs; @@ -1335,11 +1316,11 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, hctxs[i]->queue_num = i; } - q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node); + q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); if (!q) goto err_hctxs; - q->mq_map = blk_mq_make_queue_map(reg); + q->mq_map = blk_mq_make_queue_map(set); if (!q->mq_map) goto err_map; @@ -1347,33 +1328,34 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, blk_queue_rq_timeout(q, 30000); q->nr_queues = nr_cpu_ids; - q->nr_hw_queues = reg->nr_hw_queues; + q->nr_hw_queues = set->nr_hw_queues; q->queue_ctx = ctx; q->queue_hw_ctx = hctxs; - q->mq_ops = reg->ops; + q->mq_ops = set->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; q->sg_reserved_size = INT_MAX; blk_queue_make_request(q, blk_mq_make_request); - blk_queue_rq_timed_out(q, reg->ops->timeout); - if (reg->timeout) - blk_queue_rq_timeout(q, reg->timeout); + blk_queue_rq_timed_out(q, set->ops->timeout); + if (set->timeout) + blk_queue_rq_timeout(q, set->timeout); - if (reg->ops->complete) - blk_queue_softirq_done(q, reg->ops->complete); + if (set->ops->complete) + blk_queue_softirq_done(q, set->ops->complete); blk_mq_init_flush(q); - blk_mq_init_cpu_queues(q, reg->nr_hw_queues); + blk_mq_init_cpu_queues(q, set->nr_hw_queues); - q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, - cache_line_size()), GFP_KERNEL); + q->flush_rq = kzalloc(round_up(sizeof(struct request) + + set->cmd_size, cache_line_size()), + GFP_KERNEL); if (!q->flush_rq) goto err_hw; - if (blk_mq_init_hw_queues(q, reg, driver_data)) + if (blk_mq_init_hw_queues(q, set)) goto err_flush_rq; blk_mq_map_swqueue(q); @@ -1391,11 +1373,11 @@ err_hw: err_map: blk_cleanup_queue(q); err_hctxs: - for (i = 0; i < reg->nr_hw_queues; i++) { + for (i = 0; i < set->nr_hw_queues; i++) { if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); - reg->ops->free_hctx(hctxs[i], i); + set->ops->free_hctx(hctxs[i], i); } kfree(hctxs); err_percpu: @@ -1412,7 +1394,6 @@ void blk_mq_free_queue(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { kfree(hctx->ctx_map); kfree(hctx->ctxs); - blk_mq_free_rq_map(hctx, q->queuedata); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); @@ -1473,6 +1454,53 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; } +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + if (!set->nr_hw_queues) + return -EINVAL; + if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH) + return -EINVAL; + if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) + return -EINVAL; + + if (!set->nr_hw_queues || + !set->ops->queue_rq || !set->ops->map_queue || + !set->ops->alloc_hctx || !set->ops->free_hctx) + return -EINVAL; + + + set->tags = kmalloc_node(set->nr_hw_queues * sizeof(struct blk_mq_tags), + GFP_KERNEL, set->numa_node); + if (!set->tags) + goto out; + + for (i = 0; i < set->nr_hw_queues; i++) { + set->tags[i] = blk_mq_init_rq_map(set, i); + if (!set->tags[i]) + goto out_unwind; + } + + return 0; + +out_unwind: + while (--i >= 0) + blk_mq_free_rq_map(set, set->tags[i], i); +out: + return -ENOMEM; +} +EXPORT_SYMBOL(blk_mq_alloc_tag_set); + +void blk_mq_free_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + for (i = 0; i < set->nr_hw_queues; i++) + blk_mq_free_rq_map(set, set->tags[i], i); +} +EXPORT_SYMBOL(blk_mq_free_tag_set); + void blk_mq_disable_hotplug(void) { mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 7964dadb7d64..5fa14f19f752 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -1,6 +1,8 @@ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H +struct blk_mq_tag_set; + struct blk_mq_ctx { struct { spinlock_t lock; @@ -46,8 +48,7 @@ void blk_mq_disable_hotplug(void); /* * CPU -> queue mappings */ -struct blk_mq_reg; -extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg); +extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); void blk_mq_add_timer(struct request *rq); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 71df69d90900..8e7e3a0b0d24 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -32,6 +32,7 @@ struct nullb { unsigned int index; struct request_queue *q; struct gendisk *disk; + struct blk_mq_tag_set tag_set; struct hrtimer timer; unsigned int queue_depth; spinlock_t lock; @@ -320,10 +321,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) +static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, + unsigned int hctx_index) { - int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); - int tip = (reg->nr_hw_queues % nr_online_nodes); + int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes); + int tip = (set->nr_hw_queues % nr_online_nodes); int node = 0, i, n; /* @@ -338,7 +340,7 @@ static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned in tip--; if (!tip) - b_size = reg->nr_hw_queues / nr_online_nodes; + b_size = set->nr_hw_queues / nr_online_nodes; } } @@ -387,13 +389,17 @@ static struct blk_mq_ops null_mq_ops = { .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, + .alloc_hctx = blk_mq_alloc_single_hw_queue, + .free_hctx = blk_mq_free_single_hw_queue, }; -static struct blk_mq_reg null_mq_reg = { - .ops = &null_mq_ops, - .queue_depth = 64, - .cmd_size = sizeof(struct nullb_cmd), - .flags = BLK_MQ_F_SHOULD_MERGE, +static struct blk_mq_ops null_mq_ops_pernode = { + .queue_rq = null_queue_rq, + .map_queue = blk_mq_map_queue, + .init_hctx = null_init_hctx, + .complete = null_softirq_done_fn, + .alloc_hctx = null_alloc_hctx, + .free_hctx = null_free_hctx, }; static void null_del_dev(struct nullb *nullb) @@ -402,6 +408,8 @@ static void null_del_dev(struct nullb *nullb) del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); kfree(nullb); } @@ -506,7 +514,7 @@ static int null_add_dev(void) nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) - return -ENOMEM; + goto out; spin_lock_init(&nullb->lock); @@ -514,49 +522,47 @@ static int null_add_dev(void) submit_queues = nr_online_nodes; if (setup_queues(nullb)) - goto err; + goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - null_mq_reg.numa_node = home_node; - null_mq_reg.queue_depth = hw_queue_depth; - null_mq_reg.nr_hw_queues = submit_queues; - - if (use_per_node_hctx) { - null_mq_reg.ops->alloc_hctx = null_alloc_hctx; - null_mq_reg.ops->free_hctx = null_free_hctx; - } else { - null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; - null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - } - - nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); + if (use_per_node_hctx) + nullb->tag_set.ops = &null_mq_ops_pernode; + else + nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.nr_hw_queues = submit_queues; + nullb->tag_set.queue_depth = hw_queue_depth; + nullb->tag_set.numa_node = home_node; + nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); + nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + nullb->tag_set.driver_data = nullb; + + if (blk_mq_alloc_tag_set(&nullb->tag_set)) + goto out_cleanup_queues; + + nullb->q = blk_mq_init_queue(&nullb->tag_set); + if (!nullb->q) + goto out_cleanup_tags; } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_make_request(nullb->q, null_queue_bio); init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_prep_rq(nullb->q, null_rq_prep_fn); - if (nullb->q) - blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + blk_queue_softirq_done(nullb->q, null_softirq_done_fn); init_driver_queues(nullb); } - if (!nullb->q) - goto queue_fail; - nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { -queue_fail: - blk_cleanup_queue(nullb->q); - cleanup_queues(nullb); -err: - kfree(nullb); - return -ENOMEM; - } + if (!disk) + goto out_cleanup_blk_queue; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -579,6 +585,18 @@ err: sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; + +out_cleanup_blk_queue: + blk_cleanup_queue(nullb->q); +out_cleanup_tags: + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); +out_cleanup_queues: + cleanup_queues(nullb); +out_free_nullb: + kfree(nullb); +out: + return -ENOMEM; } static int __init null_init(void) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index d06206abd340..f909a8821e65 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -30,6 +30,9 @@ struct virtio_blk /* The disk structure for the kernel. */ struct gendisk *disk; + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + /* Process context for config space updates */ struct work_struct config_work; @@ -480,8 +483,9 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); -static int virtblk_init_request(void *data, struct blk_mq_hw_ctx *hctx, - struct request *rq, unsigned int nr) +static int virtblk_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) { struct virtio_blk *vblk = data; struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); @@ -495,18 +499,12 @@ static struct blk_mq_ops virtio_mq_ops = { .map_queue = blk_mq_map_queue, .alloc_hctx = blk_mq_alloc_single_hw_queue, .free_hctx = blk_mq_free_single_hw_queue, - .init_request = virtblk_init_request, .complete = virtblk_request_done, + .init_request = virtblk_init_request, }; -static struct blk_mq_reg virtio_mq_reg = { - .ops = &virtio_mq_ops, - .nr_hw_queues = 1, - .queue_depth = 0, /* Set in virtblk_probe */ - .numa_node = NUMA_NO_NODE, - .flags = BLK_MQ_F_SHOULD_MERGE, -}; -module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); +static unsigned int virtblk_queue_depth; +module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); static int virtblk_probe(struct virtio_device *vdev) { @@ -562,20 +560,32 @@ static int virtblk_probe(struct virtio_device *vdev) } /* Default queue sizing is to fill the ring. */ - if (!virtio_mq_reg.queue_depth) { - virtio_mq_reg.queue_depth = vblk->vq->num_free; + if (!virtblk_queue_depth) { + virtblk_queue_depth = vblk->vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) - virtio_mq_reg.queue_depth /= 2; + virtblk_queue_depth /= 2; } - virtio_mq_reg.cmd_size = + + memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); + vblk->tag_set.ops = &virtio_mq_ops; + vblk->tag_set.nr_hw_queues = 1; + vblk->tag_set.queue_depth = virtblk_queue_depth; + vblk->tag_set.numa_node = NUMA_NO_NODE; + vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + vblk->tag_set.cmd_size = sizeof(struct virtblk_req) + sizeof(struct scatterlist) * sg_elems; + vblk->tag_set.driver_data = vblk; - q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); + err = blk_mq_alloc_tag_set(&vblk->tag_set); + if (err) + goto out_put_disk; + + q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); if (!q) { err = -ENOMEM; - goto out_put_disk; + goto out_free_tags; } q->queuedata = vblk; @@ -678,6 +688,8 @@ static int virtblk_probe(struct virtio_device *vdev) out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); +out_free_tags: + blk_mq_free_tag_set(&vblk->tag_set); out_put_disk: put_disk(vblk->disk); out_free_vq: @@ -704,6 +716,8 @@ static void virtblk_remove(struct virtio_device *vdev) del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); + blk_mq_free_tag_set(&vblk->tag_set); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 29c1a6e83814..a4ea0ce83b07 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -33,8 +33,6 @@ struct blk_mq_hw_ctx { unsigned int nr_ctx_map; unsigned long *ctx_map; - struct request **rqs; - struct list_head page_list; struct blk_mq_tags *tags; unsigned long queued; @@ -42,7 +40,6 @@ struct blk_mq_hw_ctx { #define BLK_MQ_MAX_DISPATCH_ORDER 10 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; - unsigned int queue_depth; unsigned int numa_node; unsigned int cmd_size; /* per-request extra data */ @@ -50,7 +47,7 @@ struct blk_mq_hw_ctx { struct kobject kobj; }; -struct blk_mq_reg { +struct blk_mq_tag_set { struct blk_mq_ops *ops; unsigned int nr_hw_queues; unsigned int queue_depth; @@ -59,18 +56,22 @@ struct blk_mq_reg { int numa_node; unsigned int timeout; unsigned int flags; /* BLK_MQ_F_* */ + void *driver_data; + + struct blk_mq_tags **tags; }; typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); -typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); +typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, + unsigned int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); -typedef int (init_request_fn)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int); -typedef void (exit_request_fn)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int); +typedef int (init_request_fn)(void *, struct request *, unsigned int, + unsigned int, unsigned int); +typedef void (exit_request_fn)(void *, struct request *, unsigned int, + unsigned int); struct blk_mq_ops { /* @@ -127,10 +128,13 @@ enum { BLK_MQ_MAX_DEPTH = 2048, }; -struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +void blk_mq_free_tag_set(struct blk_mq_tag_set *set); + void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_insert_request(struct request *, bool, bool, bool); @@ -139,10 +143,10 @@ void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); -struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); bool blk_mq_end_io_partial(struct request *rq, int error, @@ -173,12 +177,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) return (void *) rq + sizeof(*rq); } -static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, - unsigned int tag) -{ - return hctx->rqs[tag]; -} - #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) -- cgit v1.2.3 From fb3ccb5da71273e7f0d50b50bc879e50cedd60e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Apr 2014 10:30:12 +0200 Subject: block: all blk-mq requests are tagged Instead of setting the REQ_QUEUED flag on each of them just take it into account in the only macro checking it. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index eb5e94803892..95bb551273ab 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1101,7 +1101,8 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) /* * tag stuff */ -#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) +#define blk_rq_tagged(rq) \ + ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED)) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); -- cgit v1.2.3 From 81b3b2711072b6047d5f332cd8751a1c5c9a3fb2 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 28 Jan 2014 12:36:48 +0100 Subject: clocksource: sh_cmt: Add support for multiple channels per device CMT hardware devices can support multiple channels, with global registers and per-channel registers. The sh_cmt driver currently models the hardware with one Linux device per channel. This model makes it difficult to handle global registers in a clean way. Add support for a new model that uses one Linux device per timer with multiple channels per device. This requires changes to platform data, add new channel configuration fields. Support for the legacy model is kept and will be removed after all platforms switch to the new model. Signed-off-by: Laurent Pinchart --- drivers/clocksource/sh_cmt.c | 304 +++++++++++++++++++++++++++++++++---------- include/linux/sh_timer.h | 1 + 2 files changed, 237 insertions(+), 68 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index c753efcfe9f5..1efe7d64efca 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -53,7 +53,16 @@ struct sh_cmt_device; * channel registers block. All other versions have a shared start/stop register * located in the global space. * - * Note that CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit + * Channels are indexed from 0 to N-1 in the documentation. The channel index + * infers the start/stop bit position in the control register and the channel + * registers block address. Some CMT instances have a subset of channels + * available, in which case the index in the documentation doesn't match the + * "real" index as implemented in hardware. This is for instance the case with + * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0 + * in the documentation but using start/stop bit 5 and having its registers + * block at 0x60. + * + * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable. */ @@ -85,10 +94,14 @@ struct sh_cmt_info { struct sh_cmt_channel { struct sh_cmt_device *cmt; - unsigned int index; - void __iomem *base; + unsigned int index; /* Index in the documentation */ + unsigned int hwidx; /* Real hardware index */ + + void __iomem *iostart; + void __iomem *ioctrl; + unsigned int timer_bit; unsigned long flags; unsigned long match_value; unsigned long next_match_value; @@ -105,6 +118,7 @@ struct sh_cmt_device { struct platform_device *pdev; const struct sh_cmt_info *info; + bool legacy; void __iomem *mapbase_ch; void __iomem *mapbase; @@ -112,6 +126,9 @@ struct sh_cmt_device { struct sh_cmt_channel *channels; unsigned int num_channels; + + bool has_clockevent; + bool has_clocksource; }; #define SH_CMT16_CMCSR_CMF (1 << 7) @@ -223,41 +240,47 @@ static const struct sh_cmt_info sh_cmt_info[] = { static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch) { - return ch->cmt->info->read_control(ch->cmt->mapbase, 0); + if (ch->iostart) + return ch->cmt->info->read_control(ch->iostart, 0); + else + return ch->cmt->info->read_control(ch->cmt->mapbase, 0); } -static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) +static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, + unsigned long value) { - return ch->cmt->info->read_control(ch->base, CMCSR); + if (ch->iostart) + ch->cmt->info->write_control(ch->iostart, 0, value); + else + ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); } -static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) +static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) { - return ch->cmt->info->read_count(ch->base, CMCNT); + return ch->cmt->info->read_control(ch->ioctrl, CMCSR); } -static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, +static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, unsigned long value) { - ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); } -static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, - unsigned long value) +static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) { - ch->cmt->info->write_control(ch->base, CMCSR, value); + return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, unsigned long value) { - ch->cmt->info->write_count(ch->base, CMCNT, value); + ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); } static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, unsigned long value) { - ch->cmt->info->write_count(ch->base, CMCOR, value); + ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); } static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, @@ -286,7 +309,6 @@ static DEFINE_RAW_SPINLOCK(sh_cmt_lock); static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) { - struct sh_timer_config *cfg = ch->cmt->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ @@ -294,9 +316,9 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) value = sh_cmt_read_cmstr(ch); if (start) - value |= 1 << cfg->timer_bit; + value |= 1 << ch->timer_bit; else - value &= ~(1 << cfg->timer_bit); + value &= ~(1 << ch->timer_bit); sh_cmt_write_cmstr(ch, value); raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); @@ -790,27 +812,72 @@ static void sh_cmt_register_clockevent(struct sh_cmt_channel *ch, static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name, bool clockevent, bool clocksource) { - if (clockevent) + if (clockevent) { + ch->cmt->has_clockevent = true; sh_cmt_register_clockevent(ch, name); + } - if (clocksource) + if (clocksource) { + ch->cmt->has_clocksource = true; sh_cmt_register_clocksource(ch, name); + } return 0; } static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, - struct sh_cmt_device *cmt) + unsigned int hwidx, bool clockevent, + bool clocksource, struct sh_cmt_device *cmt) { - struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; int irq; int ret; + /* Skip unused channels. */ + if (!clockevent && !clocksource) + return 0; + ch->cmt = cmt; - ch->base = cmt->mapbase_ch; ch->index = index; + ch->hwidx = hwidx; + + /* + * Compute the address of the channel control register block. For the + * timers with a per-channel start/stop register, compute its address + * as well. + * + * For legacy configuration the address has been mapped explicitly. + */ + if (cmt->legacy) { + ch->ioctrl = cmt->mapbase_ch; + } else { + switch (cmt->info->model) { + case SH_CMT_16BIT: + ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; + break; + case SH_CMT_32BIT: + case SH_CMT_48BIT: + ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; + break; + case SH_CMT_32BIT_FAST: + /* + * The 32-bit "fast" timer has a single channel at hwidx + * 5 but is located at offset 0x40 instead of 0x60 for + * some reason. + */ + ch->ioctrl = cmt->mapbase + 0x40; + break; + case SH_CMT_48BIT_GEN2: + ch->iostart = cmt->mapbase + ch->hwidx * 0x100; + ch->ioctrl = ch->iostart + 0x10; + break; + } + } + + if (cmt->legacy) + irq = platform_get_irq(cmt->pdev, 0); + else + irq = platform_get_irq(cmt->pdev, ch->index); - irq = platform_get_irq(cmt->pdev, 0); if (irq < 0) { dev_err(&cmt->pdev->dev, "ch%u: failed to get irq\n", ch->index); @@ -825,9 +892,15 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, ch->match_value = ch->max_match_value; raw_spin_lock_init(&ch->lock); + if (cmt->legacy) { + ch->timer_bit = ch->hwidx; + } else { + ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 + ? 0 : ch->hwidx; + } + ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), - cfg->clockevent_rating != 0, - cfg->clocksource_rating != 0); + clockevent, clocksource); if (ret) { dev_err(&cmt->pdev->dev, "ch%u: registration failed\n", ch->index); @@ -847,97 +920,180 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, return 0; } -static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) +static int sh_cmt_map_memory(struct sh_cmt_device *cmt) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - struct resource *res, *res2; - int ret; - ret = -ENXIO; + struct resource *mem; - cmt->pdev = pdev; + mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); + return -ENXIO; + } - if (!cfg) { - dev_err(&cmt->pdev->dev, "missing platform data\n"); - goto err0; + cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); + if (cmt->mapbase == NULL) { + dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); + return -ENXIO; } + return 0; +} + +static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) +{ + struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; + struct resource *res, *res2; + + /* map memory, let mapbase_ch point to our channel */ res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); - goto err0; + return -ENXIO; } - /* optional resource for the shared timer start/stop register */ - res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); - - /* map memory, let mapbase_ch point to our channel */ cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res)); if (cmt->mapbase_ch == NULL) { dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); - goto err0; + return -ENXIO; } + /* optional resource for the shared timer start/stop register */ + res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); + /* map second resource for CMSTR */ cmt->mapbase = ioremap_nocache(res2 ? res2->start : res->start - cfg->channel_offset, res2 ? resource_size(res2) : 2); if (cmt->mapbase == NULL) { dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n"); - goto err1; + iounmap(cmt->mapbase_ch); + return -ENXIO; } - /* get hold of clock */ + /* identify the model based on the resources */ + if (resource_size(res) == 6) + cmt->info = &sh_cmt_info[SH_CMT_16BIT]; + else if (res2 && (resource_size(res2) == 4)) + cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; + else + cmt->info = &sh_cmt_info[SH_CMT_32BIT]; + + return 0; +} + +static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) +{ + iounmap(cmt->mapbase); + if (cmt->mapbase_ch) + iounmap(cmt->mapbase_ch); +} + +static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) +{ + struct sh_timer_config *cfg = pdev->dev.platform_data; + const struct platform_device_id *id = pdev->id_entry; + unsigned int hw_channels; + int ret; + + memset(cmt, 0, sizeof(*cmt)); + cmt->pdev = pdev; + + if (!cfg) { + dev_err(&cmt->pdev->dev, "missing platform data\n"); + return -ENXIO; + } + + cmt->info = (const struct sh_cmt_info *)id->driver_data; + cmt->legacy = cmt->info ? false : true; + + /* Get hold of clock. */ cmt->clk = clk_get(&cmt->pdev->dev, "cmt_fck"); if (IS_ERR(cmt->clk)) { dev_err(&cmt->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(cmt->clk); - goto err2; + return PTR_ERR(cmt->clk); } ret = clk_prepare(cmt->clk); if (ret < 0) - goto err3; + goto err_clk_put; - /* identify the model based on the resources */ - if (resource_size(res) == 6) - cmt->info = &sh_cmt_info[SH_CMT_16BIT]; - else if (res2 && (resource_size(res2) == 4)) - cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; + /* + * Map the memory resource(s). We need to support both the legacy + * platform device configuration (with one device per channel) and the + * new version (with multiple channels per device). + */ + if (cmt->legacy) + ret = sh_cmt_map_memory_legacy(cmt); else - cmt->info = &sh_cmt_info[SH_CMT_32BIT]; + ret = sh_cmt_map_memory(cmt); - cmt->channels = kzalloc(sizeof(*cmt->channels), GFP_KERNEL); + if (ret < 0) + goto err_clk_unprepare; + + /* Allocate and setup the channels. */ + if (cmt->legacy) { + cmt->num_channels = 1; + hw_channels = 0; + } else { + cmt->num_channels = hweight8(cfg->channels_mask); + hw_channels = cfg->channels_mask; + } + + cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), + GFP_KERNEL); if (cmt->channels == NULL) { ret = -ENOMEM; - goto err4; + goto err_unmap; } - cmt->num_channels = 1; + if (cmt->legacy) { + ret = sh_cmt_setup_channel(&cmt->channels[0], + cfg->timer_bit, cfg->timer_bit, + cfg->clockevent_rating != 0, + cfg->clocksource_rating != 0, cmt); + if (ret < 0) + goto err_unmap; + } else { + unsigned int mask = hw_channels; + unsigned int i; - ret = sh_cmt_setup_channel(&cmt->channels[0], cfg->timer_bit, cmt); - if (ret < 0) - goto err4; + /* + * Use the first channel as a clock event device and the second + * channel as a clock source. If only one channel is available + * use it for both. + */ + for (i = 0; i < cmt->num_channels; ++i) { + unsigned int hwidx = ffs(mask) - 1; + bool clocksource = i == 1 || cmt->num_channels == 1; + bool clockevent = i == 0; + + ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, + clockevent, clocksource, + cmt); + if (ret < 0) + goto err_unmap; + + mask &= ~(1 << hwidx); + } + } platform_set_drvdata(pdev, cmt); return 0; -err4: + +err_unmap: kfree(cmt->channels); + sh_cmt_unmap_memory(cmt); +err_clk_unprepare: clk_unprepare(cmt->clk); -err3: +err_clk_put: clk_put(cmt->clk); -err2: - iounmap(cmt->mapbase); -err1: - iounmap(cmt->mapbase_ch); -err0: return ret; } static int sh_cmt_probe(struct platform_device *pdev) { struct sh_cmt_device *cmt = platform_get_drvdata(pdev); - struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; if (!is_early_platform_device(pdev)) { @@ -966,7 +1122,7 @@ static int sh_cmt_probe(struct platform_device *pdev) return 0; out: - if (cfg->clockevent_rating || cfg->clocksource_rating) + if (cmt->has_clockevent || cmt->has_clocksource) pm_runtime_irq_safe(&pdev->dev); else pm_runtime_idle(&pdev->dev); @@ -979,12 +1135,24 @@ static int sh_cmt_remove(struct platform_device *pdev) return -EBUSY; /* cannot unregister clockevent and clocksource */ } +static const struct platform_device_id sh_cmt_id_table[] = { + { "sh_cmt", 0 }, + { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, + { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, + { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, + { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, + { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); + static struct platform_driver sh_cmt_device_driver = { .probe = sh_cmt_probe, .remove = sh_cmt_remove, .driver = { .name = "sh_cmt", - } + }, + .id_table = sh_cmt_id_table, }; static int __init sh_cmt_init(void) diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h index 4d9dcd138315..8e1e036d6d45 100644 --- a/include/linux/sh_timer.h +++ b/include/linux/sh_timer.h @@ -7,6 +7,7 @@ struct sh_timer_config { int timer_bit; unsigned long clockevent_rating; unsigned long clocksource_rating; + unsigned int channels_mask; }; #endif /* __SH_TIMER_H__ */ -- cgit v1.2.3 From 63151a449ebaef062ffac5b302206565ff5ef62e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:52 +0200 Subject: blk-mq: allow drivers to hook into I/O completion Split out the bottom half of blk_mq_end_io so that drivers can perform work when they know a request has been completed, but before it has been freed. This also obsoletes blk_mq_end_io_partial as drivers can now pass any value to blk_update_request directly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 16 ++++++++++------ include/linux/blk-mq.h | 9 ++------- 2 files changed, 12 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index b59a8d027dff..86d66e0e900c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -294,20 +294,24 @@ void blk_mq_clone_flush_request(struct request *flush_rq, hctx->cmd_size); } -bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) +inline void __blk_mq_end_io(struct request *rq, int error) { - if (blk_update_request(rq, error, blk_rq_bytes(rq))) - return true; - blk_account_io_done(rq); if (rq->end_io) rq->end_io(rq, error); else blk_mq_free_request(rq); - return false; } -EXPORT_SYMBOL(blk_mq_end_io_partial); +EXPORT_SYMBOL(__blk_mq_end_io); + +void blk_mq_end_io(struct request *rq, int error) +{ + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + BUG(); + __blk_mq_end_io(rq, error); +} +EXPORT_SYMBOL(blk_mq_end_io); static void __blk_mq_complete_request_remote(void *data) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a4ea0ce83b07..a81b474b794f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -149,13 +149,8 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); -bool blk_mq_end_io_partial(struct request *rq, int error, - unsigned int nr_bytes); -static inline void blk_mq_end_io(struct request *rq, int error) -{ - bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); - BUG_ON(!done); -} +void blk_mq_end_io(struct request *rq, int error); +void __blk_mq_end_io(struct request *rq, int error); void blk_mq_complete_request(struct request *rq); -- cgit v1.2.3 From 1b4a325858f695a9b5041313602d34b36f463724 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:54 +0200 Subject: blk-mq: add async parameter to blk_mq_start_stopped_hw_queues Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++-- drivers/block/virtio_blk.c | 4 ++-- include/linux/blk-mq.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 963a82109386..da3808823e44 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -700,7 +700,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL(blk_mq_start_hw_queue); -void blk_mq_start_stopped_hw_queues(struct request_queue *q) +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; int i; @@ -711,7 +711,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q) clear_bit(BLK_MQ_S_STOPPED, &hctx->state); preempt_disable(); - blk_mq_run_hw_queue(hctx, true); + blk_mq_run_hw_queue(hctx, async); preempt_enable(); } } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index f909a8821e65..7a51f065edcd 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -151,7 +151,7 @@ static void virtblk_done(struct virtqueue *vq) /* In case queue is stopped waiting for more buffers. */ if (req_done) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) @@ -762,7 +762,7 @@ static int virtblk_restore(struct virtio_device *vdev) vblk->config_enable = true; ret = init_vq(vdev->priv); if (!ret) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); return ret; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a81b474b794f..9ecfab96d8c9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -157,7 +157,7 @@ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); -void blk_mq_start_stopped_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); /* * Driver command data is immediately after the request. So subtract request -- cgit v1.2.3 From 70f4db639c5b2479e08657392cbf3ba3cceea11c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 10:48:08 -0600 Subject: blk-mq: add blk_mq_delay_queue Add a blk-mq equivalent to blk_delay_queue so that the scsi layer can ask to be kicked again after a delay. Signed-off-by: Christoph Hellwig Modified by me to kill the unnecessary preempt disable/enable in the delayed workqueue handler. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 ++++-- block/blk-mq.c | 45 +++++++++++++++++++++++++++++++++++++++------ include/linux/blk-mq.h | 4 +++- 3 files changed, 46 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index ae6227fd07aa..90b6e63b8769 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -251,8 +251,10 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->delayed_work); + queue_for_each_hw_ctx(q, hctx, i) { + cancel_delayed_work_sync(&hctx->run_work); + cancel_delayed_work_sync(&hctx->delay_work); + } } else { cancel_delayed_work_sync(&q->delay_work); } diff --git a/block/blk-mq.c b/block/blk-mq.c index da3808823e44..0cf52dddfa6b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -640,7 +640,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) __blk_mq_run_hw_queue(hctx); else if (hctx->queue->nr_hw_queues == 1) - kblockd_schedule_delayed_work(&hctx->delayed_work, 0); + kblockd_schedule_delayed_work(&hctx->run_work, 0); else { unsigned int cpu; @@ -651,7 +651,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) * just queue on the first CPU. */ cpu = cpumask_first(hctx->cpumask); - kblockd_schedule_delayed_work_on(cpu, &hctx->delayed_work, 0); + kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0); } } @@ -675,7 +675,8 @@ EXPORT_SYMBOL(blk_mq_run_queues); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { - cancel_delayed_work(&hctx->delayed_work); + cancel_delayed_work(&hctx->run_work); + cancel_delayed_work(&hctx->delay_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } EXPORT_SYMBOL(blk_mq_stop_hw_queue); @@ -717,15 +718,46 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); -static void blk_mq_work_fn(struct work_struct *work) +static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; - hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); + hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); __blk_mq_run_hw_queue(hctx); } +static void blk_mq_delay_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); + + if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) + __blk_mq_run_hw_queue(hctx); +} + +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + unsigned long tmo = msecs_to_jiffies(msecs); + + if (hctx->queue->nr_hw_queues == 1) + kblockd_schedule_delayed_work(&hctx->delay_work, tmo); + else { + unsigned int cpu; + + /* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement + * than the first CPU. Or we could round-robin here. For now, + * just queue on the first CPU. + */ + cpu = cpumask_first(hctx->cpumask); + kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo); + } +} +EXPORT_SYMBOL(blk_mq_delay_queue); + static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { @@ -1179,7 +1211,8 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (node == NUMA_NO_NODE) node = hctx->numa_node = set->numa_node; - INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9ecfab96d8c9..ae868e77bc2f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -18,7 +18,8 @@ struct blk_mq_hw_ctx { } ____cacheline_aligned_in_smp; unsigned long state; /* BLK_MQ_S_* flags */ - struct delayed_work delayed_work; + struct delayed_work run_work; + struct delayed_work delay_work; cpumask_var_t cpumask; unsigned long flags; /* BLK_MQ_F_* flags */ @@ -158,6 +159,7 @@ void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); /* * Driver command data is immediately after the request. So subtract request -- cgit v1.2.3 From 2f268556567ebeb3538f99b9bdad177581439dcb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:56 +0200 Subject: blk-mq: add blk_mq_start_hw_queues Add a helper to unconditionally kick contexts of a queue. This will be needed by the SCSI layer to provide fair queueing between multiple devices on a single host. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 11 +++++++++++ include/linux/blk-mq.h | 1 + 2 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 0cf52dddfa6b..543bbc08a261 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -701,6 +701,17 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL(blk_mq_start_hw_queue); +void blk_mq_start_hw_queues(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_start_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_start_hw_queues); + + void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ae868e77bc2f..391377e53367 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -158,6 +158,7 @@ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); +void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -- cgit v1.2.3 From ed0791b2f83cec4e77d88c4e9baabcebf9254a78 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:57 +0200 Subject: blk-mq: add blk_mq_requeue_request This allows to requeue a request that has been accepted by ->queue_rq earlier. This is needed by the SCSI layer in various error conditions. The existing internal blk_mq_requeue_request is renamed to __blk_mq_requeue_request as it is a lower level building block for this funtionality. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 18 ++++++++++++++++-- include/linux/blk-mq.h | 2 ++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 543bbc08a261..ee225cc312b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -400,7 +400,7 @@ static void blk_mq_start_request(struct request *rq, bool last) rq->cmd_flags |= REQ_END; } -static void blk_mq_requeue_request(struct request *rq) +static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; @@ -413,6 +413,20 @@ static void blk_mq_requeue_request(struct request *rq) rq->nr_phys_segments--; } +void blk_mq_requeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + __blk_mq_requeue_request(rq); + blk_clear_rq_complete(rq); + + trace_block_rq_requeue(q, rq); + + BUG_ON(blk_queued_rq(rq)); + blk_mq_insert_request(rq, true, true, false); +} +EXPORT_SYMBOL(blk_mq_requeue_request); + struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { return tags->rqs[tag]; @@ -602,7 +616,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) * time */ list_add(&rq->queuelist, &rq_list); - blk_mq_requeue_request(rq); + __blk_mq_requeue_request(rq); break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 391377e53367..ab469d525894 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -153,6 +153,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); +void blk_mq_requeue_request(struct request *rq); + void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); -- cgit v1.2.3 From f88a164b72bd51fe4c89e06ac9939f2afe39c7ed Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:58 +0200 Subject: blk-mq: rename mq_flush_work struct request member We will use this work_struct to requeue scsi commands from the completion handler as well, so give it a more generic name. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-flush.c | 6 +++--- include/linux/blkdev.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/block/blk-flush.c b/block/blk-flush.c index c41fc19f75d1..ec7a224d6733 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -134,7 +134,7 @@ static void mq_flush_run(struct work_struct *work) { struct request *rq; - rq = container_of(work, struct request, mq_flush_work); + rq = container_of(work, struct request, requeue_work); memset(&rq->csd, 0, sizeof(rq->csd)); blk_mq_insert_request(rq, false, true, false); @@ -143,8 +143,8 @@ static void mq_flush_run(struct work_struct *work) static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { - INIT_WORK(&rq->mq_flush_work, mq_flush_run); - kblockd_schedule_work(&rq->mq_flush_work); + INIT_WORK(&rq->requeue_work, mq_flush_run); + kblockd_schedule_work(&rq->requeue_work); return false; } else { if (add_front) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 95bb551273ab..71288083a46f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -98,7 +98,7 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct mq_flush_work; + struct work_struct requeue_work; unsigned long fifo_time; }; -- cgit v1.2.3 From 12120077b2612a243d158605640cd39266906667 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:59 +0200 Subject: block: export blk_finish_request This allows to mirror the blk-mq code flow for more a more readable I/O completion handler in SCSI. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 3 ++- include/linux/blkdev.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 90b6e63b8769..c4269701cb4f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2497,7 +2497,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); /* * queue lock must be held */ -static void blk_finish_request(struct request *req, int error) +void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -2523,6 +2523,7 @@ static void blk_finish_request(struct request *req, int error) __blk_put_request(req->q, req); } } +EXPORT_SYMBOL(blk_finish_request); /** * blk_end_bidi_request - Complete a bidi request diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 71288083a46f..20b26d4e53a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -936,6 +936,7 @@ extern struct request *blk_fetch_request(struct request_queue *q); */ extern bool blk_update_request(struct request *rq, int error, unsigned int nr_bytes); +extern void blk_finish_request(struct request *rq, int error); extern bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void blk_end_request_all(struct request *rq, int error); -- cgit v1.2.3 From 49fd524f95cb4cc699d435e0ebb08b1c6220da6d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 16 Apr 2014 10:57:18 -0600 Subject: bsg: update check for rq based driver for blk-mq bsg currently checks ->request_fn to check whether a queue can handle struct request. But with blk-mq, we don't have a request_fn yet are request based. Add a queue_is_rq_based() helper and use that in bsg, I'm guessing this is not the last place we need to update for this. Besides, it better explains what is being checked. Signed-off-by: Jens Axboe --- block/bsg.c | 2 +- include/linux/blkdev.h | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/bsg.c b/block/bsg.c index 420a5a9f1b23..e5214c148096 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -1008,7 +1008,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent, /* * we need a proper transport to send commands, not a stacked device */ - if (!q->request_fn) + if (!queue_is_rq_based(q)) return 0; bcd = &q->bsg_dev; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 20b26d4e53a2..74ee55fefcf0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -612,6 +612,15 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) +/* + * Driver can handle struct request, if it either has an old style + * request_fn defined, or is blk-mq based. + */ +static inline bool queue_is_rq_based(struct request_queue *q) +{ + return q->request_fn || q->mq_ops; +} + static inline unsigned int blk_queue_cluster(struct request_queue *q) { return q->limits.cluster; -- cgit v1.2.3 From a5d92ad32dad94fd8f3f61778561d532bb3a2f77 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 17 Mar 2014 10:57:00 +0000 Subject: efivars: Stop passing a struct argument to efivar_validate() In preparation for compat support, we can't assume that user variable object is represented by a 'struct efi_variable'. Convert the validation functions to take the variable name as an argument, which is the only piece of the struct that was ever used anyway. Cc: Mike Waychison Signed-off-by: Matt Fleming --- drivers/firmware/efi/efivars.c | 6 ++++-- drivers/firmware/efi/vars.c | 30 +++++++++++++++--------------- include/linux/efi.h | 6 ++++-- 3 files changed, 23 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 2c21cccc2572..5ee2cfb96698 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -231,7 +231,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) } if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(new_var, data, size) == false) { + efivar_validate(name, data, size) == false) { printk(KERN_ERR "efivars: Malformed variable content\n"); return -EINVAL; } @@ -339,6 +339,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, { struct efi_variable *new_var = (struct efi_variable *)buf; struct efivar_entry *new_entry; + efi_char16_t *name; unsigned long size; u32 attributes; u8 *data; @@ -351,11 +352,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, return -EINVAL; attributes = new_var->Attributes; + name = new_var->VariableName; size = new_var->DataSize; data = new_var->Data; if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(new_var, data, size) == false) { + efivar_validate(name, data, size) == false) { printk(KERN_ERR "efivars: Malformed variable content\n"); return -EINVAL; } diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index b22659cccca4..f0a43646a2f3 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL); EXPORT_SYMBOL_GPL(efivar_work); static bool -validate_device_path(struct efi_variable *var, int match, u8 *buffer, +validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { struct efi_generic_dev_path *node; @@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer, } static bool -validate_boot_order(struct efi_variable *var, int match, u8 *buffer, +validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { /* An array of 16-bit integers */ @@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer, } static bool -validate_load_option(struct efi_variable *var, int match, u8 *buffer, +validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { u16 filepathlength; int i, desclength = 0, namelen; - namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); + namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); /* Either "Boot" or "Driver" followed by four digits of hex */ for (i = match; i < match+4; i++) { - if (var->VariableName[i] > 127 || - hex_to_bin(var->VariableName[i] & 0xff) < 0) + if (var_name[i] > 127 || + hex_to_bin(var_name[i] & 0xff) < 0) return true; } @@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, /* * And, finally, check the filepath */ - return validate_device_path(var, match, buffer + desclength + 6, + return validate_device_path(var_name, match, buffer + desclength + 6, filepathlength); } static bool -validate_uint16(struct efi_variable *var, int match, u8 *buffer, +validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { /* A single 16-bit integer */ @@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer, } static bool -validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, +validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, unsigned long len) { int i; @@ -166,7 +166,7 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, struct variable_validate { char *name; - bool (*validate)(struct efi_variable *var, int match, u8 *data, + bool (*validate)(efi_char16_t *var_name, int match, u8 *data, unsigned long len); }; @@ -189,10 +189,10 @@ static const struct variable_validate variable_validate[] = { }; bool -efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) +efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) { int i; - u16 *unicode_name = var->VariableName; + u16 *unicode_name = var_name; for (i = 0; variable_validate[i].validate != NULL; i++) { const char *name = variable_validate[i].name; @@ -208,7 +208,7 @@ efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) /* Wildcard in the matching name means we've matched */ if (c == '*') - return variable_validate[i].validate(var, + return variable_validate[i].validate(var_name, match, data, len); /* Case sensitive match */ @@ -217,7 +217,7 @@ efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) /* Reached the end of the string while matching */ if (!c) - return variable_validate[i].validate(var, + return variable_validate[i].validate(var_name, match, data, len); } } @@ -805,7 +805,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, *set = false; - if (efivar_validate(&entry->var, data, *size) == false) + if (efivar_validate(name, data, *size) == false) return -EINVAL; /* diff --git a/include/linux/efi.h b/include/linux/efi.h index 82d0abb2b19f..6a4d8e27d1d7 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1039,8 +1039,10 @@ struct efivars { * and we use a page for reading/writing. */ +#define EFI_VAR_NAME_LEN 1024 + struct efi_variable { - efi_char16_t VariableName[1024/sizeof(efi_char16_t)]; + efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; efi_guid_t VendorGuid; unsigned long DataSize; __u8 Data[1024]; @@ -1122,7 +1124,7 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, struct list_head *head, bool remove); -bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len); +bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len); extern struct work_struct efivar_work; void efivar_run_worker(void); -- cgit v1.2.3 From 68c3b4d1676d870f0453c31d5a52e7e65c7448ae Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 31 Mar 2014 21:50:44 +0300 Subject: KVM: VMX: speed up wildcard MMIO EVENTFD With KVM, MMIO is much slower than PIO, due to the need to do page walk and emulation. But with EPT, it does not have to be: we know the address from the VMCS so if the address is unique, we can look up the eventfd directly, bypassing emulation. Unfortunately, this only works if userspace does not need to match on access length and data. The implementation adds a separate FAST_MMIO bus internally. This serves two purposes: - minimize overhead for old userspace that does not use eventfd with lengtth = 0 - minimize disruption in other code (since we don't know the length, devices on the MMIO bus only get a valid address in write, this way we don't need to touch all devices to teach them to handle an invalid length) At the moment, this optimization only has effect for EPT on x86. It will be possible to speed up MMIO for NPT and MMU using the same idea in the future. With this patch applied, on VMX MMIO EVENTFD is essentially as fast as PIO. I was unable to detect any measureable slowdown to non-eventfd MMIO. Making MMIO faster is important for the upcoming virtio 1.0 which includes an MMIO signalling capability. The idea was suggested by Peter Anvin. Lots of thanks to Gleb for pre-review and suggestions. Signed-off-by: Michael S. Tsirkin Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 4 ++++ include/linux/kvm_host.h | 1 + include/uapi/linux/kvm.h | 1 + virt/kvm/eventfd.c | 16 ++++++++++++++++ virt/kvm/kvm_main.c | 1 + 5 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1f68c5831924..eb3f2b1b764c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5528,6 +5528,10 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { + skip_emulated_instruction(vcpu); + return 1; + } ret = handle_mmio_page_fault_common(vcpu, gpa, true); if (likely(ret == RET_MMIO_PF_EMULATE)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7d21cf9f4380..6c3c2eb96d06 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -163,6 +163,7 @@ enum kvm_bus { KVM_MMIO_BUS, KVM_PIO_BUS, KVM_VIRTIO_CCW_NOTIFY_BUS, + KVM_FAST_MMIO_BUS, KVM_NR_BUSES }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 39098a61f41c..d8a6ce4c2a83 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -515,6 +515,7 @@ enum { kvm_ioeventfd_flag_nr_pio, kvm_ioeventfd_flag_nr_deassign, kvm_ioeventfd_flag_nr_virtio_ccw_notify, + kvm_ioeventfd_flag_nr_fast_mmio, kvm_ioeventfd_flag_nr_max, }; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 2721996bb9c2..912ec5a95e2c 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -770,6 +770,16 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) if (ret < 0) goto unlock_fail; + /* When length is ignored, MMIO is also put on a separate bus, for + * faster lookups. + */ + if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) { + ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS, + p->addr, 0, &p->dev); + if (ret < 0) + goto register_fail; + } + kvm->buses[bus_idx]->ioeventfd_count++; list_add_tail(&p->list, &kvm->ioeventfds); @@ -777,6 +787,8 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) return 0; +register_fail: + kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); unlock_fail: mutex_unlock(&kvm->slots_lock); @@ -816,6 +828,10 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); + if (!p->length) { + kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS, + &p->dev); + } kvm->buses[bus_idx]->ioeventfd_count--; ioeventfd_release(p); ret = 0; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 56baae8c2f56..96456ac888ba 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2922,6 +2922,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, return -EOPNOTSUPP; } +EXPORT_SYMBOL_GPL(kvm_io_bus_write); /* kvm_io_bus_read - called under kvm->slots_lock */ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, -- cgit v1.2.3 From febdbfe8a91ce0d11939d4940b592eb0dba8d663 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 6 Feb 2014 18:16:07 +0100 Subject: arch: Prepare for smp_mb__{before,after}_atomic() Since the smp_mb__{before,after}*() ops are fundamentally dependent on how an arch can implement atomics it doesn't make sense to have 3 variants of them. They must all be the same. Furthermore, the 3 variants suggest they're only valid for those 3 atomic ops, while we have many more where they could be applied. So move away from smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() and reduce the interface to just the two: smp_mb__{before,after}_atomic(). This patch prepares the way by introducing default implementations in asm-generic/barrier.h that default to a full barrier and providing __deprecated inlines for the previous 6 barriers if they're not provided by the arch. This should allow for a mostly painless transition (lots of deprecated warns in the interim). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-wr59327qdyi9mbzn6x937s4e@git.kernel.org Cc: Arnd Bergmann Cc: "Chen, Gong" Cc: John Sullivan Cc: Linus Torvalds Cc: Mauro Carvalho Chehab Cc: Srinivas Pandruvada Cc: "Theodore Ts'o" Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/asm-generic/atomic.h | 7 +------ include/asm-generic/barrier.h | 8 ++++++++ include/asm-generic/bitops.h | 9 +-------- include/linux/atomic.h | 36 ++++++++++++++++++++++++++++++++++++ include/linux/bitops.h | 20 ++++++++++++++++++++ kernel/sched/core.c | 16 ++++++++++++++++ 6 files changed, 82 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 33bd2de3bc1e..9c79e7603459 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -16,6 +16,7 @@ #define __ASM_GENERIC_ATOMIC_H #include +#include #ifdef CONFIG_SMP /* Force people to define core atomics */ @@ -182,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) } #endif -/* Assume that atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 6f692f8ac664..1402fa855388 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -62,6 +62,14 @@ #define set_mb(var, value) do { (var) = (value); mb(); } while (0) #endif +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() smp_mb() +#endif + #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 280ca7a96f75..dcdcacf2fd2b 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -11,14 +11,7 @@ #include #include - -/* - * clear_bit may not imply a memory barrier - */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif +#include #include #include diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5b08a8540ecf..fef3a809e7cf 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -3,6 +3,42 @@ #define _LINUX_ATOMIC_H #include +/* + * Provide __deprecated wrappers for the new interface, avoid flag day changes. + * We need the ugly external functions to break header recursion hell. + */ +#ifndef smp_mb__before_atomic_inc +static inline void __deprecated smp_mb__before_atomic_inc(void) +{ + extern void __smp_mb__before_atomic(void); + __smp_mb__before_atomic(); +} +#endif + +#ifndef smp_mb__after_atomic_inc +static inline void __deprecated smp_mb__after_atomic_inc(void) +{ + extern void __smp_mb__after_atomic(void); + __smp_mb__after_atomic(); +} +#endif + +#ifndef smp_mb__before_atomic_dec +static inline void __deprecated smp_mb__before_atomic_dec(void) +{ + extern void __smp_mb__before_atomic(void); + __smp_mb__before_atomic(); +} +#endif + +#ifndef smp_mb__after_atomic_dec +static inline void __deprecated smp_mb__after_atomic_dec(void) +{ + extern void __smp_mb__after_atomic(void); + __smp_mb__after_atomic(); +} +#endif + /** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a0..cbc5833fb221 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -32,6 +32,26 @@ extern unsigned long __sw_hweight64(__u64 w); */ #include +/* + * Provide __deprecated wrappers for the new interface, avoid flag day changes. + * We need the ugly external functions to break header recursion hell. + */ +#ifndef smp_mb__before_clear_bit +static inline void __deprecated smp_mb__before_clear_bit(void) +{ + extern void __smp_mb__before_atomic(void); + __smp_mb__before_atomic(); +} +#endif + +#ifndef smp_mb__after_clear_bit +static inline void __deprecated smp_mb__after_clear_bit(void) +{ + extern void __smp_mb__after_atomic(void); + __smp_mb__after_atomic(); +} +#endif + #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..8a70ec091760 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -90,6 +90,22 @@ #define CREATE_TRACE_POINTS #include +#ifdef smp_mb__before_atomic +void __smp_mb__before_atomic(void) +{ + smp_mb__before_atomic(); +} +EXPORT_SYMBOL(__smp_mb__before_atomic); +#endif + +#ifdef smp_mb__after_atomic +void __smp_mb__after_atomic(void) +{ + smp_mb__after_atomic(); +} +EXPORT_SYMBOL(__smp_mb__after_atomic); +#endif + void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) { unsigned long delta; -- cgit v1.2.3 From 27e4f9d0012a9bb7011aade862f08679d2921ab0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 9 Apr 2014 12:50:34 +0200 Subject: sched/wait: Explain the shadowing and type inconsistencies Stick in a comment before someone else tries to fix the sparse warning this generates. Requested-by: Andrew Morton Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-o2ro6f3vkxklni0bc8f7m68s@git.kernel.org Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/wait.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/wait.h b/include/linux/wait.h index 559044c79232..2b563a15a77d 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -191,11 +191,23 @@ wait_queue_head_t *bit_waitqueue(void *, int); (!__builtin_constant_p(state) || \ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ +/* + * The below macro ___wait_event() has an explicit shadow of the __ret + * variable when used from the wait_event_*() macros. + * + * This is so that both can use the ___wait_cond_timeout() construct + * to wrap the condition. + * + * The type inconsistency of the wait_event_*() __ret variable is also + * on purpose; we use long where we can return timeout values and int + * otherwise. + */ + #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ wait_queue_t __wait; \ - long __ret = ret; \ + long __ret = ret; /* explicit shadow */ \ \ INIT_LIST_HEAD(&__wait.task_list); \ if (exclusive) \ -- cgit v1.2.3 From 08f8aeb55d7727d644dbbbbfb798fe937d47751d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Apr 2014 14:27:25 +0200 Subject: sched: Remove set_need_resched() The last user is gone now, so we can safely remove this function. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/thread_info.h | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index fddbe2023a5d..cb0cec94fda3 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -104,20 +104,6 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) -static inline __deprecated void set_need_resched(void) -{ - /* - * Use of this function in deprecated. - * - * As of this writing there are only a few users in the DRM tree left - * all of which are wrong and can be removed without causing too much - * grief. - * - * The DRM people are aware and are working on removing the last few - * instances. - */ -} - #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK -- cgit v1.2.3 From c464c76eec4be587604ca082e8cded7e6b89f3bf Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 18 Mar 2014 16:56:41 +0800 Subject: perf: Allow building PMU drivers as modules This patch adds support for building PMU driver as module. It exports the functions perf_pmu_{register,unregister}() and adds reference tracking for the PMU driver module. When the PMU driver is built as a module, each active event of the PMU holds a reference to the driver module. Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1395133004-23205-1-git-send-email-zheng.z.yan@intel.com Cc: eranian@google.com Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 + kernel/events/core.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3356abcfff18..af6dcf1d9e47 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -172,6 +172,7 @@ struct perf_event; struct pmu { struct list_head entry; + struct module *module; struct device *dev; const struct attribute_group **attr_groups; const char *name; diff --git a/kernel/events/core.c b/kernel/events/core.c index f83a71a3e46d..5129b1201050 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "internal.h" @@ -3229,6 +3230,9 @@ static void __free_event(struct perf_event *event) if (event->ctx) put_ctx(event->ctx); + if (event->pmu) + module_put(event->pmu->module); + call_rcu(&event->rcu_head, free_event_rcu); } static void free_event(struct perf_event *event) @@ -6551,6 +6555,7 @@ free_pdc: free_percpu(pmu->pmu_disable_count); goto unlock; } +EXPORT_SYMBOL_GPL(perf_pmu_register); void perf_pmu_unregister(struct pmu *pmu) { @@ -6572,6 +6577,7 @@ void perf_pmu_unregister(struct pmu *pmu) put_device(pmu->dev); free_pmu_context(pmu); } +EXPORT_SYMBOL_GPL(perf_pmu_unregister); struct pmu *perf_init_event(struct perf_event *event) { @@ -6585,6 +6591,10 @@ struct pmu *perf_init_event(struct perf_event *event) pmu = idr_find(&pmu_idr, event->attr.type); rcu_read_unlock(); if (pmu) { + if (!try_module_get(pmu->module)) { + pmu = ERR_PTR(-ENODEV); + goto unlock; + } event->pmu = pmu; ret = pmu->event_init(event); if (ret) @@ -6593,6 +6603,10 @@ struct pmu *perf_init_event(struct perf_event *event) } list_for_each_entry_rcu(pmu, &pmus, entry) { + if (!try_module_get(pmu->module)) { + pmu = ERR_PTR(-ENODEV); + goto unlock; + } event->pmu = pmu; ret = pmu->event_init(event); if (!ret) @@ -6771,6 +6785,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, err_pmu: if (event->destroy) event->destroy(event); + module_put(pmu->module); err_ns: if (event->ns) put_pid_ns(event->ns); -- cgit v1.2.3 From 4e857c58efeb99393cba5a5d0d8ec7117183137c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 17 Mar 2014 18:06:10 +0100 Subject: arch: Mass conversion of smp_mb__*() Mostly scripted conversion of the smp_mb__* barriers. Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org Cc: Linus Torvalds Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar --- block/blk-iopoll.c | 4 +- crypto/chainiv.c | 2 +- drivers/base/power/domain.c | 2 +- drivers/block/mtip32xx/mtip32xx.c | 4 +- drivers/cpuidle/coupled.c | 2 +- drivers/firewire/ohci.c | 2 +- drivers/gpu/drm/drm_irq.c | 10 ++-- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/md/bcache/bcache.h | 2 +- drivers/md/bcache/closure.h | 2 +- drivers/md/dm-bufio.c | 8 ++-- drivers/md/dm-snap.c | 4 +- drivers/md/dm.c | 2 +- drivers/md/raid5.c | 2 +- drivers/media/usb/dvb-usb-v2/dvb_usb_core.c | 6 +-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 6 +-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 18 ++++---- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 26 +++++------ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 8 ++-- drivers/net/ethernet/broadcom/cnic.c | 8 ++-- drivers/net/ethernet/brocade/bna/bnad.c | 6 +-- drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/sge.c | 6 +-- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- drivers/net/ethernet/freescale/gianfar.c | 8 ++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 ++-- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +-- drivers/net/wireless/ti/wlcore/main.c | 2 +- drivers/pci/xen-pcifront.c | 4 +- drivers/scsi/isci/remote_device.c | 2 +- drivers/target/loopback/tcm_loop.c | 4 +- drivers/target/target_core_alua.c | 26 +++++------ drivers/target/target_core_device.c | 6 +-- drivers/target/target_core_iblock.c | 2 +- drivers/target/target_core_pr.c | 56 +++++++++++------------ drivers/target/target_core_transport.c | 16 +++---- drivers/target/target_core_ua.c | 10 ++-- drivers/tty/n_tty.c | 2 +- drivers/tty/serial/mxs-auart.c | 4 +- drivers/usb/gadget/tcm_usb_gadget.c | 4 +- drivers/usb/serial/usb_wwan.c | 2 +- drivers/vhost/scsi.c | 2 +- drivers/w1/w1_family.c | 4 +- drivers/xen/xen-pciback/pciback_ops.c | 4 +- fs/btrfs/btrfs_inode.h | 2 +- fs/btrfs/extent_io.c | 2 +- fs/btrfs/inode.c | 6 +-- fs/btrfs/ioctl.c | 2 +- fs/buffer.c | 2 +- fs/ext4/resize.c | 2 +- fs/gfs2/glock.c | 8 ++-- fs/gfs2/glops.c | 2 +- fs/gfs2/lock_dlm.c | 4 +- fs/gfs2/recovery.c | 2 +- fs/gfs2/sys.c | 4 +- fs/jbd2/commit.c | 6 +-- fs/nfs/dir.c | 12 ++--- fs/nfs/inode.c | 2 +- fs/nfs/nfs4filelayoutdev.c | 4 +- fs/nfs/nfs4state.c | 4 +- fs/nfs/pagelist.c | 6 +-- fs/nfs/pnfs.c | 2 +- fs/nfs/pnfs.h | 2 +- fs/nfs/write.c | 4 +- fs/ubifs/lpt_commit.c | 4 +- fs/ubifs/tnc_commit.c | 4 +- include/asm-generic/bitops/atomic.h | 2 +- include/asm-generic/bitops/lock.h | 2 +- include/linux/buffer_head.h | 2 +- include/linux/genhd.h | 2 +- include/linux/interrupt.h | 8 ++-- include/linux/netdevice.h | 2 +- include/linux/sched.h | 6 +-- include/linux/sunrpc/sched.h | 8 ++-- include/linux/sunrpc/xprt.h | 8 ++-- include/linux/tracehook.h | 2 +- include/net/ip_vs.h | 4 +- kernel/debug/debug_core.c | 4 +- kernel/futex.c | 4 +- kernel/kmod.c | 2 +- kernel/rcu/tree.c | 22 ++++----- kernel/rcu/tree_plugin.h | 8 ++-- kernel/sched/cpupri.c | 6 +-- kernel/sched/wait.c | 2 +- mm/backing-dev.c | 2 +- mm/filemap.c | 4 +- net/atm/pppoatm.c | 2 +- net/bluetooth/hci_event.c | 4 +- net/core/dev.c | 8 ++-- net/core/link_watch.c | 2 +- net/ipv4/inetpeer.c | 2 +- net/ipv4/tcp_output.c | 4 +- net/netfilter/nf_conntrack_core.c | 2 +- net/rds/ib_recv.c | 4 +- net/rds/iw_recv.c | 4 +- net/rds/send.c | 6 +-- net/rds/tcp_send.c | 2 +- net/sunrpc/auth.c | 2 +- net/sunrpc/auth_gss/auth_gss.c | 2 +- net/sunrpc/backchannel_rqst.c | 4 +- net/sunrpc/xprt.c | 4 +- net/sunrpc/xprtsock.c | 16 +++---- net/unix/af_unix.c | 2 +- sound/pci/bt87x.c | 4 +- 106 files changed, 284 insertions(+), 288 deletions(-) (limited to 'include/linux') diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index c11d24e379e2..f8c6a11b13f0 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -49,7 +49,7 @@ EXPORT_SYMBOL(blk_iopoll_sched); void __blk_iopoll_complete(struct blk_iopoll *iop) { list_del(&iop->list); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(__blk_iopoll_complete); @@ -161,7 +161,7 @@ EXPORT_SYMBOL(blk_iopoll_disable); void blk_iopoll_enable(struct blk_iopoll *iop) { BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(blk_iopoll_enable); diff --git a/crypto/chainiv.c b/crypto/chainiv.c index 834d8dd3d4fc..9c294c8f9a07 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c @@ -126,7 +126,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) int err = ctx->err; if (!ctx->queue.qlen) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(CHAINIV_STATE_INUSE, &ctx->state); if (!ctx->queue.qlen || diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ae098a261fcd..eee55c1e5fde 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -105,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) { atomic_inc(&genpd->sd_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 59c5abe32f06..4fd8d6c1c3d2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -224,9 +224,9 @@ static int get_slot(struct mtip_port *port) */ static inline void release_slot(struct mtip_port *port, int tag) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(tag, port->allocated); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /* diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index cb6654bfad77..73fe2f8d7f96 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -159,7 +159,7 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) { int n = dev->coupled->online_count; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(a); while (atomic_read(a) < n) diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8db663219560..995dd42a2627 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -3498,7 +3498,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) } clear_bit_unlock(0, &ctx->flushing_completions); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } tasklet_enable(&ctx->context.tasklet); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c2676b5908d9..ec5c3f4cdd01 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -156,7 +156,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) */ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { atomic_inc(&dev->vblank[crtc].count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } /* Invalidate all timestamps while vblank irq's are off. */ @@ -864,9 +864,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) vblanktimestamp(dev, crtc, tslot) = t_vblank; } - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(diff, &dev->vblank[crtc].count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } /** @@ -1330,9 +1330,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) /* Increment cooked vblank count. This also atomically commits * the timestamp computed above. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&dev->vblank[crtc].count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } else { DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", crtc, (int) diff_ns); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7753249b3a95..5409bfafff63 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2147,7 +2147,7 @@ static void i915_error_work_func(struct work_struct *work) * updates before * the counter increment. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&dev_priv->gpu_error.reset_counter); kobject_uevent_env(&dev->primary->kdev->kobj, diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 82c9c5d35251..d2ebcf323094 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -828,7 +828,7 @@ static inline bool cached_dev_get(struct cached_dev *dc) return false; /* Paired with the mb in cached_dev_attach */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return true; } diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 7ef7461912be..a08e3eeac3c5 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -243,7 +243,7 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn, cl->fn = fn; cl->wq = wq; /* between atomic_dec() in closure_put() */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); } static inline void closure_queue(struct closure *cl) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 66c5d130c8c2..4e84095833db 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -607,9 +607,9 @@ static void write_endio(struct bio *bio, int error) BUG_ON(!test_bit(B_WRITING, &b->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(B_WRITING, &b->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&b->state, B_WRITING); } @@ -997,9 +997,9 @@ static void read_endio(struct bio *bio, int error) BUG_ON(!test_bit(B_READING, &b->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(B_READING, &b->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&b->state, B_READING); } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ebddef5237e4..8e0caed0bf74 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -642,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) struct dm_snapshot *s = pe->snap; mempool_free(pe, s->pending_pool); - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&s->pending_exceptions_count); } @@ -783,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s) static void merge_shutdown(struct dm_snapshot *s) { clear_bit_unlock(RUNNING_MERGE, &s->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&s->state_bits, RUNNING_MERGE); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 455e64916498..2db768e4553f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2447,7 +2447,7 @@ static void dm_wq_work(struct work_struct *work) static void dm_queue_flush(struct mapped_device *md) { clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); queue_work(md->wq, &md->work); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ad1b9bea446e..2afef4ec9312 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4400,7 +4400,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) * STRIPE_ON_UNPLUG_LIST clear but the stripe * is still in our list */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); /* * STRIPE_ON_RELEASE_LIST could be set here. In that diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c index de02db802ace..e35580618936 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c @@ -399,7 +399,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) /* clear 'streaming' status bit */ clear_bit(ADAP_STREAMING, &adap->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&adap->state_bits, ADAP_STREAMING); skip_feed_stop: @@ -550,7 +550,7 @@ static int dvb_usb_fe_init(struct dvb_frontend *fe) err: if (!adap->suspend_resume_active) { clear_bit(ADAP_INIT, &adap->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&adap->state_bits, ADAP_INIT); } @@ -591,7 +591,7 @@ err: if (!adap->suspend_resume_active) { adap->active_fe = -1; clear_bit(ADAP_SLEEP, &adap->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&adap->state_bits, ADAP_SLEEP); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9261d5313b5b..dd57c7c5a3da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2781,7 +2781,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) case LOAD_OPEN: netif_tx_start_all_queues(bp->dev); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); break; case LOAD_DIAG: @@ -4939,9 +4939,9 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, u32 verbose) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(flag, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", flag); schedule_delayed_work(&bp->sp_rtnl_task, 0); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a78edaccceee..16391db2e8c9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1858,10 +1858,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) return; #endif - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&bp->cq_spq_left); /* push the change in bp->spq_left and towards the memory */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); @@ -1876,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) * sp_state is cleared, and this order prevents * races */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); wmb(); clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* schedule the sp task as mcp ack is required */ bnx2x_schedule_sp_task(bp); @@ -5272,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp) __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); /* mark latest Q bit */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* send Q update ramrod for FCoE Q */ rc = bnx2x_queue_state_change(bp, &queue_params); @@ -5500,7 +5500,7 @@ next_spqe: spqe_cnt++; } /* for */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(spqe_cnt, &bp->eq_spq_left); bp->eq_cons = sw_cons; @@ -13869,9 +13869,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { int count = ctl->data.credit.credit_count; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(count, &bp->cq_spq_left); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } case DRV_CTL_ULP_REGISTER_CMD: { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 31297266b743..d725317c4277 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -258,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /** @@ -2131,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, /* The operation is completed */ clear_bit(p->state, p->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -3576,16 +3576,16 @@ error_exit1: static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) @@ -4200,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp, if (rc) { o->next_state = BNX2X_Q_STATE_MAX; clear_bit(pending_bit, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; } @@ -4288,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, wmb(); clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -5279,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, wmb(); clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -5926,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp, if (rc) { o->next_state = BNX2X_F_STATE_MAX; clear_bit(cmd, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 5c523b32db70..f82ac5ac2336 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1626,9 +1626,9 @@ static void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, struct bnx2x_virtf *vf) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, @@ -2960,9 +2960,9 @@ void bnx2x_iov_task(struct work_struct *work) void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(flag, &bp->iov_task_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 09f3fefcbf9c..4dd48d2fa804 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk) static int cnic_close_prep(struct cnic_sock *csk) { clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) @@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk) static int cnic_abort_prep(struct cnic_sock *csk) { clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) msleep(1); @@ -3646,7 +3646,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk) csk_hold(csk); clear_bit(SK_F_INUSE, &csk->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); while (atomic_read(&csk->ref_count) != 1) msleep(1); cnic_cm_cleanup(csk); @@ -4026,7 +4026,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) set_bit(SK_F_HW_ERR, &csk->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(SK_F_OFFLD_SCHED, &csk->flags); cnic_cm_upcall(cp, csk, opcode); break; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 675550fe8ee9..3a77f9ead004 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -249,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); return sent; @@ -1126,7 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work) bnad_txq_cleanup(bnad, tcb); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } @@ -2992,7 +2992,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) sent = bnad_txcmpl_process(bnad, tcb); if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } else { netif_stop_queue(netdev); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 0fe7ff750d77..05613a85ce61 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -281,7 +281,7 @@ static int cxgb_close(struct net_device *dev) if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 8b069f96e920..3dfcf600fcc6 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1379,7 +1379,7 @@ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, struct sge_qset *qs = txq_to_qset(q, qid); set_bit(qid, &qs->txq_stopped); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (should_restart_tx(q) && test_and_clear_bit(qid, &qs->txq_stopped)) @@ -1492,7 +1492,7 @@ static void restart_ctrlq(unsigned long data) if (!skb_queue_empty(&q->sendq)) { set_bit(TXQ_CTRL, &qs->txq_stopped); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) @@ -1697,7 +1697,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); if (unlikely(q->size - q->in_use < ndesc)) { set_bit(TXQ_OFLD, &qs->txq_stopped); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ca95cf2954eb..e249528c8e60 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2031,7 +2031,7 @@ static void sge_rx_timer_cb(unsigned long data) struct sge_fl *fl = s->egr_map[id]; clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (fl_starving(fl)) { rxq = container_of(fl, struct sge_eth_rxq, fl); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 9cfa4b4bb089..9d88c1d50b49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1951,7 +1951,7 @@ static void sge_rx_timer_cb(unsigned long data) struct sge_fl *fl = s->egr_map[id]; clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* * Since we are accessing fl without a lock there's a diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9125d9abf099..d82f092cae90 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1797,9 +1797,9 @@ void stop_gfar(struct net_device *dev) netif_tx_stop_all_queues(dev); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(GFAR_DOWN, &priv->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); disable_napi(priv); @@ -2042,9 +2042,9 @@ int startup_gfar(struct net_device *ndev) gfar_init_tx_rx_base(priv); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(GFAR_DOWN, &priv->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* Start Rx/Tx DMA and enable the interrupts */ gfar_start(priv); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 861b722c2672..1e526c072a44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4671,7 +4671,7 @@ static void i40e_service_event_complete(struct i40e_pf *pf) BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); /* flush memory to make sure state is correct before next watchog */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__I40E_SERVICE_SCHED, &pf->state); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4c526b7f99f..2fecc2626de5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -376,7 +376,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); /* flush memory to make sure state is correct before next watchdog */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); } @@ -4671,7 +4671,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); @@ -5567,7 +5567,7 @@ static int ixgbe_resume(struct pci_dev *pdev) e_dev_err("Cannot enable PCI device from suspend\n"); return err; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); @@ -8541,7 +8541,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d0799e8e31e4..de2793b06305 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1668,7 +1668,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -3354,7 +3354,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -3712,7 +3712,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ed88d3913483..e71eae353368 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -543,7 +543,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) * wl1271_ps_elp_wakeup cannot be called concurrently. */ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); ret = wlcore_fw_status(wl, wl->fw_status); if (ret < 0) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 179b8edc2262..53df39a22c8a 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -662,9 +662,9 @@ static void pcifront_do_aer(struct work_struct *data) notify_remote_via_evtchn(pdev->evtchn); /*in case of we lost an aer request in four lines time_window*/ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(_PDEVB_op_active, &pdev->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); schedule_pcifront_aer_op(pdev); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 96a26f454673..cc51f38b116d 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref) clear_bit(IDEV_STOP_PENDING, &idev->flags); clear_bit(IDEV_IO_READY, &idev->flags); clear_bit(IDEV_GONE, &idev->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(IDEV_ALLOCATED, &idev->flags); wake_up(&ihost->eventq); } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c886ad1c39fb..73ab75ddaf42 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -951,7 +951,7 @@ static int tcm_loop_port_link( struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; atomic_inc(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * Add Linux/SCSI struct scsi_device by HCTL */ @@ -986,7 +986,7 @@ static void tcm_loop_port_unlink( scsi_device_put(sd); atomic_dec(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fcbe6125b73e..0b79b852f4b2 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -393,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) continue; atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -404,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -990,7 +990,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) * TARGET PORT GROUPS command */ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_lock_bh(&port->sep_alua_lock); @@ -1020,7 +1020,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) spin_lock(&tg_pt_gp->tg_pt_gp_lock); atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* @@ -1054,7 +1054,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (tg_pt_gp->tg_pt_gp_transition_complete) @@ -1116,7 +1116,7 @@ static int core_alua_do_transition_tg_pt( */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { @@ -1159,7 +1159,7 @@ int core_alua_do_port_transition( spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); /* * For storage objects that are members of the 'default_lu_gp', @@ -1176,7 +1176,7 @@ int core_alua_do_port_transition( rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return rc; } /* @@ -1190,7 +1190,7 @@ int core_alua_do_port_transition( dev = lu_gp_mem->lu_gp_mem_dev; atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lu_gp->lu_gp_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1219,7 +1219,7 @@ int core_alua_do_port_transition( tg_pt_gp->tg_pt_gp_alua_nacl = NULL; } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return @@ -1230,7 +1230,7 @@ int core_alua_do_port_transition( spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); if (rc) break; } @@ -1238,7 +1238,7 @@ int core_alua_do_port_transition( spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&lu_gp->lu_gp_lock); @@ -1252,7 +1252,7 @@ int core_alua_do_port_transition( } atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return rc; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 65001e133670..72618776ede4 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( continue; atomic_inc(&deve->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&nacl->device_list_lock); return deve; @@ -1392,7 +1392,7 @@ int core_dev_add_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); atomic_inc(&lun->lun_acl_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " @@ -1426,7 +1426,7 @@ int core_dev_del_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_del(&lacl->lacl_list); atomic_dec(&lun->lun_acl_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 9e0232cca92e..7e6b857c6b3f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -323,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err) * Bump the ib_bio_err_cnt and release bio. */ atomic_inc(&ibr->ib_bio_err_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } bio_put(bio); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 3013287a2aaa..df357862286e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { atomic_inc(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); spin_lock_bh(&port->sep_alua_lock); @@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( continue; atomic_inc(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_bh(&port->sep_alua_lock); /* * Grab a configfs group dependency that is released @@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); goto out; } /* @@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( sa_res_key, all_tg_pt, aptpl); if (!pr_reg_atp) { atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_lunacl_undepend_item(deve_tmp); goto out; } @@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&dev->se_port_lock); @@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; } atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) { atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_check_implicit_release( @@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) &tpg->tpg_group.cg_item); atomic_dec(&tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) @@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) if (nacl->dynamic_node_acl) { atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } @@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) &nacl->acl_group.cg_item); atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) @@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) */ if (!lun_acl) { atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } nacl = lun_acl->se_lun_nacl; @@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) &lun_acl->se_lun_group.cg_item); atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static sense_reason_t @@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port( continue; atomic_inc(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(tmp_tpg)) { pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; } @@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port( tmp_tpg, i_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&tmp_tpg->acl_node_lock); @@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port( pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_tpg_undepend_item(tmp_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; @@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port( pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -3168,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, continue; atomic_inc(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(dest_se_tpg)) { pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); atomic_dec(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_put_pr_reg; } @@ -3273,7 +3273,7 @@ after_iport_check: initiator_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&dest_se_tpg->acl_node_lock); @@ -3289,7 +3289,7 @@ after_iport_check: pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_node_acl = NULL; ret = TCM_INVALID_PARAMETER_LIST; goto out; @@ -3314,7 +3314,7 @@ after_iport_check: if (core_scsi3_lunacl_depend_item(dest_se_deve)) { pr_err("core_scsi3_lunacl_depend_item() failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_se_deve = NULL; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out; @@ -3880,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) add_desc_len = 0; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); /* * Determine expected length of $FABRIC_MOD specific @@ -3894,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } /* @@ -3956,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); /* * Set the ADDITIONAL DESCRIPTOR LENGTH */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d4b98690a736..4badca1cd625 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -736,7 +736,7 @@ void target_qf_do_work(struct work_struct *work) list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec(&dev->dev_qf_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -1148,7 +1148,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Dormant to Active status. */ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, dev->transport->name); @@ -1705,7 +1705,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) return false; case MSG_ORDERED_TAG: atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " " se_ordered_id: %u\n", @@ -1723,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) * For SIMPLE and UNTAGGED Task Attribute commands */ atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } @@ -1828,7 +1828,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { atomic_dec(&dev->simple_cmds); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1840,7 +1840,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { atomic_dec(&dev->dev_ordered_sync); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1899,7 +1899,7 @@ static void transport_handle_queue_full( spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); @@ -2875,7 +2875,7 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return; } } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 505519b10cb7..101858e245b3 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -162,7 +162,7 @@ int core_scsi3_ua_allocate( spin_unlock_irq(&nacl->device_list_lock); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } list_add_tail(&ua->ua_nacl_list, &deve->ua_list); @@ -175,7 +175,7 @@ int core_scsi3_ua_allocate( asc, ascq); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } @@ -190,7 +190,7 @@ void core_scsi3_ua_release_all( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); } @@ -251,7 +251,7 @@ void core_scsi3_ua_for_check_condition( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); @@ -310,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 41fe8a047d37..746ae80b972f 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2041,7 +2041,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, if (found) clear_bit(eol, ldata->read_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); ldata->read_tail += c; if (found) { diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index aa97fd845b4d..4b5b3c2fe328 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -200,7 +200,7 @@ static void dma_tx_callback(void *param) /* clear the bit used to serialize the DMA tx. */ clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* wake up the possible processes. */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -275,7 +275,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) mxs_auart_dma_tx(s, i); } else { clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } return; } diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index f058c0368d61..819875c7e394 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c @@ -1851,7 +1851,7 @@ static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun) struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); atomic_inc(&tpg->tpg_port_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } @@ -1861,7 +1861,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg, struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); atomic_dec(&tpg->tpg_port_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int usbg_check_stop_free(struct se_cmd *se_cmd) diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 640fe0173236..f1ec1680e822 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -325,7 +325,7 @@ static void usb_wwan_outdat_callback(struct urb *urb) for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(i, &portdata->out_busy); break; } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index cf50ce93975b..aeb513108448 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1255,7 +1255,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); match = true; } mutex_unlock(&tpg->tv_tpg_mutex); diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 3bff6b37b472..3651ec801f45 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -139,9 +139,9 @@ void w1_family_get(struct w1_family *f) void __w1_family_get(struct w1_family *f) { - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&f->refcnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } EXPORT_SYMBOL(w1_unregister_family); diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 607e41460c0d..c4a0666de6f5 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -348,9 +348,9 @@ void xen_pcibk_do_op(struct work_struct *data) notify_remote_via_irq(pdev->evtchn_irq); /* Mark that we're done. */ - smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ + smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ clear_bit(_PDEVF_op_active, &pdev->flags); - smp_mb__after_clear_bit(); /* /before/ final check for work */ + smp_mb__after_atomic(); /* /before/ final check for work */ /* Check to see if the driver domain tried to start another request in * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index c9a24444ec9a..2256e9cceec5 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -279,7 +279,7 @@ static inline void btrfs_inode_block_unlocked_dio(struct inode *inode) static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3955e475ceec..f29a54e454d4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3458,7 +3458,7 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb, static void end_extent_buffer_writeback(struct extent_buffer *eb) { clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f805bc944fa..5a3b8371772e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7126,7 +7126,7 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) * before atomic variable goto zero, we must make sure * dip->errors is perceived to be set. */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); } /* if there are more bios still pending for this dio, just exit */ @@ -7306,7 +7306,7 @@ out_err: * before atomic variable goto zero, we must * make sure dip->errors is perceived to be set. */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); if (atomic_dec_and_test(&dip->pending_bios)) bio_io_error(dip->orig_bio); @@ -7449,7 +7449,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, return 0; atomic_inc(&inode->i_dio_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * The generic stuff only does filemap_write_and_wait_range, which diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e79ff6b90cb7..f45040a4bb76 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -642,7 +642,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, return -EINVAL; atomic_inc(&root->will_be_snapshoted); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); btrfs_wait_nocow_write(root); ret = btrfs_start_delalloc_inodes(root, 0); diff --git a/fs/buffer.c b/fs/buffer.c index 9ddb9fc7d923..6a8110c03a47 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -77,7 +77,7 @@ EXPORT_SYMBOL(__lock_buffer); void unlock_buffer(struct buffer_head *bh) { clear_bit_unlock(BH_Lock, &bh->b_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&bh->b_state, BH_Lock); } EXPORT_SYMBOL(unlock_buffer); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index f3b84cd9de56..08b3c116915b 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -42,7 +42,7 @@ int ext4_resize_begin(struct super_block *sb) void ext4_resize_end(struct super_block *sb) { clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index aec7f73832f0..c355f7320e44 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -277,7 +277,7 @@ static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holde static void gfs2_holder_wake(struct gfs2_holder *gh) { clear_bit(HIF_WAIT, &gh->gh_iflags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&gh->gh_iflags, HIF_WAIT); } @@ -411,7 +411,7 @@ static void gfs2_demote_wake(struct gfs2_glock *gl) { gl->gl_demote_state = LM_ST_EXCLUSIVE; clear_bit(GLF_DEMOTE, &gl->gl_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&gl->gl_flags, GLF_DEMOTE); } @@ -620,7 +620,7 @@ out: out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); gl->gl_lockref.count++; if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gl->gl_lockref.count--; @@ -628,7 +628,7 @@ out_sched: out_unlock: clear_bit(GLF_LOCK, &gl->gl_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return; } diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 54b66809e818..74d9a3dbf16f 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -221,7 +221,7 @@ static void inode_go_sync(struct gfs2_glock *gl) * Writeback of the data mapping may cause the dirty flag to be set * so we have to clear it again here. */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(GLF_DIRTY, &gl->gl_flags); } diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index c1eb555dc588..91f274de1246 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -1134,7 +1134,7 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots, queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); spin_unlock(&ls->ls_recover_spin); } @@ -1271,7 +1271,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); return 0; diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 7ad4094d68c0..fe7a56fb6084 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -587,7 +587,7 @@ fail: gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); done: clear_bit(JDF_RECOVERY, &jd->jd_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&jd->jd_flags, JDF_RECOVERY); } diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index de25d5577e5d..529d9a9eb897 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -333,7 +333,7 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); else if (val == 0) { clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); gfs2_glock_thaw(sdp); } else { ret = -EINVAL; @@ -482,7 +482,7 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) rv = jid = -EINVAL; sdp->sd_lockstruct.ls_jid = jid; clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); out: spin_unlock(&sdp->sd_jindex_spin); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 5f26139a165a..6fac74349856 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -43,7 +43,7 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) clear_buffer_uptodate(bh); if (orig_bh) { clear_bit_unlock(BH_Shadow, &orig_bh->b_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&orig_bh->b_state, BH_Shadow); } unlock_buffer(bh); @@ -239,7 +239,7 @@ static int journal_submit_data_buffers(journal_t *journal, spin_lock(&journal->j_list_lock); J_ASSERT(jinode->i_transaction == commit_transaction); clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); } spin_unlock(&journal->j_list_lock); @@ -277,7 +277,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal, } spin_lock(&journal->j_list_lock); clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index d9f3d067cd15..4a3d4ef76127 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2032,9 +2032,9 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry) { put_rpccred(entry->cred); kfree(entry); - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_long_dec(&nfs_access_nr_entries); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static void nfs_access_free_list(struct list_head *head) @@ -2082,9 +2082,9 @@ nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc) else { remove_lru_entry: list_del_init(&nfsi->access_cache_inode_lru); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } spin_unlock(&inode->i_lock); } @@ -2232,9 +2232,9 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) nfs_access_add_rbtree(inode, cache); /* Update accounting */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_long_inc(&nfs_access_nr_entries); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* Add inode to global LRU list */ if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) { diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0c438973f3c8..e6f7398d2b3c 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1085,7 +1085,7 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) trace_nfs_invalidate_mapping_exit(inode, ret); clear_bit_unlock(NFS_INO_INVALIDATING, bitlock); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(bitlock, NFS_INO_INVALIDATING); out: return ret; diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index efac602edb37..b9c61efe9660 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -789,9 +789,9 @@ static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds) static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NFS4DS_CONNECTING, &ds->ds_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 2349518eef2c..c0583b9bef71 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1140,9 +1140,9 @@ static int nfs4_run_state_manager(void *); static void nfs4_clear_state_manager_bit(struct nfs_client *clp) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); rpc_wake_up(&clp->cl_rpcwaitq); } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 2ffebf2081ce..03ed984ab4d8 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -95,7 +95,7 @@ nfs_iocounter_dec(struct nfs_io_counter *c) { if (atomic_dec_and_test(&c->io_count)) { clear_bit(NFS_IO_INPROGRESS, &c->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&c->flags, NFS_IO_INPROGRESS); } } @@ -193,9 +193,9 @@ void nfs_unlock_request(struct nfs_page *req) printk(KERN_ERR "NFS: Invalid unlock attempted\n"); BUG(); } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(PG_BUSY, &req->wb_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&req->wb_flags, PG_BUSY); } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index cb53d450ae32..fd9536e494bc 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1810,7 +1810,7 @@ static void pnfs_clear_layoutcommitting(struct inode *inode) unsigned long *bitlock = &NFS_I(inode)->flags; clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 023793909778..c3058a076596 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -275,7 +275,7 @@ pnfs_get_lseg(struct pnfs_layout_segment *lseg) { if (lseg) { atomic_inc(&lseg->pls_refcount); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } return lseg; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 9a3b6a4cd6b9..ffb9459f180b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -405,7 +405,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) nfs_pageio_complete(&pgio); clear_bit_unlock(NFS_INO_FLUSHING, bitlock); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(bitlock, NFS_INO_FLUSHING); if (err < 0) @@ -1458,7 +1458,7 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) static void nfs_commit_clear_lock(struct nfs_inode *nfsi) { clear_bit(NFS_INO_COMMIT, &nfsi->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&nfsi->flags, NFS_INO_COMMIT); } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 4b826abb1528..45d4e96a6bac 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -460,9 +460,9 @@ static int write_cnodes(struct ubifs_info *c) * important. */ clear_bit(DIRTY_CNODE, &cnode->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(COW_CNODE, &cnode->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); offs += len; dbg_chk_lpt_sz(c, 1, len); cnode = cnode->cnext; diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 52a6559275c4..3600994f8411 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -895,9 +895,9 @@ static int write_index(struct ubifs_info *c) * the reason for the second barrier. */ clear_bit(DIRTY_ZNODE, &znode->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(COW_ZNODE, &znode->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* * We have marked the znode as clean but have not updated the diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 9ae6c34dc191..49673510b484 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile unsigned long *addr) diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 308a9e22c802..c30266e94806 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -20,7 +20,7 @@ */ #define clear_bit_unlock(nr, addr) \ do { \ - smp_mb__before_clear_bit(); \ + smp_mb__before_atomic(); \ clear_bit(nr, addr); \ } while (0) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c40302f909ce..7cbf837a279c 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -278,7 +278,7 @@ static inline void get_bh(struct buffer_head *bh) static inline void put_bh(struct buffer_head *bh) { - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&bh->b_count); } diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 9f3c275e053e..ec274e0f4ed2 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -649,7 +649,7 @@ static inline void hd_ref_init(struct hd_struct *part) static inline void hd_struct_get(struct hd_struct *part) { atomic_inc(&part->ref); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static inline int hd_struct_try_get(struct hd_struct *part) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c7bfac1c4a7b..157111043281 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -453,7 +453,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t) static inline void tasklet_unlock(struct tasklet_struct *t) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } @@ -501,7 +501,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) static inline void tasklet_disable_nosync(struct tasklet_struct *t) { atomic_inc(&t->count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static inline void tasklet_disable(struct tasklet_struct *t) @@ -513,13 +513,13 @@ static inline void tasklet_disable(struct tasklet_struct *t) static inline void tasklet_enable(struct tasklet_struct *t) { - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&t->count); } static inline void tasklet_hi_enable(struct tasklet_struct *t) { - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&t->count); } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7ed3a3aa6604..616415a4fee4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -493,7 +493,7 @@ static inline void napi_disable(struct napi_struct *n) static inline void napi_enable(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 25f54c79f757..010cde3b44cb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2782,10 +2782,8 @@ static inline bool __must_check current_set_polling_and_test(void) /* * Polling state must be visible before we test NEED_RESCHED, * paired by resched_task() - * - * XXX: assumes set/clear bit are identical barrier wise. */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return unlikely(tif_need_resched()); } @@ -2803,7 +2801,7 @@ static inline bool __must_check current_clr_polling_and_test(void) * Polling state must be visible before we test NEED_RESCHED, * paired by resched_task() */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return unlikely(tif_need_resched()); } diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 3a847de83fab..ad7dbe2cfecd 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -142,18 +142,18 @@ struct rpc_task_setup { test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_clear_running(t) \ do { \ - smp_mb__before_clear_bit(); \ + smp_mb__before_atomic(); \ clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ - smp_mb__after_clear_bit(); \ + smp_mb__after_atomic(); \ } while (0) #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) #define rpc_clear_queued(t) \ do { \ - smp_mb__before_clear_bit(); \ + smp_mb__before_atomic(); \ clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ - smp_mb__after_clear_bit(); \ + smp_mb__after_atomic(); \ } while (0) #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 3e5efb2b236e..3876f0f1dfd3 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -379,9 +379,9 @@ static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) static inline void xprt_clear_connecting(struct rpc_xprt *xprt) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTING, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static inline int xprt_connecting(struct rpc_xprt *xprt) @@ -411,9 +411,9 @@ static inline void xprt_clear_bound(struct rpc_xprt *xprt) static inline void xprt_clear_binding(struct rpc_xprt *xprt) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_BINDING, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 1e98b5530425..6f8ab7da27c4 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -191,7 +191,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) * pairs with task_work_add()->set_notify_resume() after * hlist_add_head(task->task_works); */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (unlikely(current->task_works)) task_work_run(); } diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 5679d927562b..624a8a54806d 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1204,7 +1204,7 @@ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) /* put back the conn without restarting its timer */ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) { - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&cp->refcnt); } void ip_vs_conn_put(struct ip_vs_conn *cp); @@ -1408,7 +1408,7 @@ static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) static inline void ip_vs_dest_put(struct ip_vs_dest *dest) { - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&dest->refcnt); } diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 2956c8da1605..1adf62b39b96 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -534,7 +534,7 @@ return_normal: kgdb_info[cpu].exception_state &= ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); kgdb_info[cpu].enter_kgdb--; - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); @@ -662,7 +662,7 @@ kgdb_restore: kgdb_info[cpu].exception_state &= ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); kgdb_info[cpu].enter_kgdb--; - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&masters_in_kgdb); /* Free kgdb_active */ atomic_set(&kgdb_active, -1); diff --git a/kernel/futex.c b/kernel/futex.c index 5f589279e462..b991ec05b8f9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -267,7 +267,7 @@ static inline void futex_get_mm(union futex_key *key) * get_futex_key() implies a full barrier. This is relied upon * as full barrier (B), see the ordering comment above. */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } /* @@ -280,7 +280,7 @@ static inline void hb_waiters_inc(struct futex_hash_bucket *hb) /* * Full barrier (A), see the ordering comment above. */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); #endif } diff --git a/kernel/kmod.c b/kernel/kmod.c index 6b375af4958d..0ac67a5861c5 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -498,7 +498,7 @@ int __usermodehelper_disable(enum umh_disable_depth depth) static void helper_lock(void) { atomic_inc(&running_helpers); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static void helper_unlock(void) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0c47e300210a..88b4a1dcb58c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -387,9 +387,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, } rcu_prepare_for_idle(smp_processor_id()); /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ - smp_mb__before_atomic_inc(); /* See above. */ + smp_mb__before_atomic(); /* See above. */ atomic_inc(&rdtp->dynticks); - smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ + smp_mb__after_atomic(); /* Force ordering with next sojourn. */ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); /* @@ -507,10 +507,10 @@ void rcu_irq_exit(void) static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, int user) { - smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ + smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ - smp_mb__after_atomic_inc(); /* See above. */ + smp_mb__after_atomic(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); rcu_cleanup_after_idle(smp_processor_id()); trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); @@ -635,10 +635,10 @@ void rcu_nmi_enter(void) (atomic_read(&rdtp->dynticks) & 0x1)) return; rdtp->dynticks_nmi_nesting++; - smp_mb__before_atomic_inc(); /* Force delay from prior write. */ + smp_mb__before_atomic(); /* Force delay from prior write. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ - smp_mb__after_atomic_inc(); /* See above. */ + smp_mb__after_atomic(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); } @@ -657,9 +657,9 @@ void rcu_nmi_exit(void) --rdtp->dynticks_nmi_nesting != 0) return; /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ - smp_mb__before_atomic_inc(); /* See above. */ + smp_mb__before_atomic(); /* See above. */ atomic_inc(&rdtp->dynticks); - smp_mb__after_atomic_inc(); /* Force delay to next write. */ + smp_mb__after_atomic(); /* Force delay to next write. */ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); } @@ -2790,7 +2790,7 @@ void synchronize_sched_expedited(void) s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { /* ensure test happens before caller kfree */ - smp_mb__before_atomic_inc(); /* ^^^ */ + smp_mb__before_atomic(); /* ^^^ */ atomic_long_inc(&rsp->expedited_workdone1); return; } @@ -2808,7 +2808,7 @@ void synchronize_sched_expedited(void) s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { /* ensure test happens before caller kfree */ - smp_mb__before_atomic_inc(); /* ^^^ */ + smp_mb__before_atomic(); /* ^^^ */ atomic_long_inc(&rsp->expedited_workdone2); return; } @@ -2837,7 +2837,7 @@ void synchronize_sched_expedited(void) s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { /* ensure test happens before caller kfree */ - smp_mb__before_atomic_inc(); /* ^^^ */ + smp_mb__before_atomic(); /* ^^^ */ atomic_long_inc(&rsp->expedited_done_lost); break; } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 962d1d589929..56db2f853e43 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2523,9 +2523,9 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) /* Record start of fully idle period. */ j = jiffies; ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&rdtp->dynticks_idle); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); } @@ -2590,9 +2590,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) } /* Record end of idle period. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&rdtp->dynticks_idle); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); /* diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 8b836b376d91..746bc9344969 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * do a write memory barrier, and then update the count, to * make sure the vector is visible when count is set. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&(vec)->count); do_mb = 1; } @@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * the new priority vec. */ if (do_mb) - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * When removing from the vector, we decrement the counter first * do a memory barrier and then clear the mask. */ atomic_dec(&(vec)->count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); cpumask_clear_cpu(cpu, vec->mask); } diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 7d50f794e248..0ffa20ae657b 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit); * * In order for this to function properly, as it uses waitqueue_active() * internally, some kind of memory barrier must be done prior to calling - * this. Typically, this will be smp_mb__after_clear_bit(), but in some + * this. Typically, this will be smp_mb__after_atomic(), but in some * cases where bitflags are manipulated non-atomically under a lock, one * may need to use a less regular barrier, such fs/inode.c's smp_mb(), * because spin_unlock() does not guarantee a memory barrier. diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 09d9591b7708..1706cbbdf5f0 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -557,7 +557,7 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) bit = sync ? BDI_sync_congested : BDI_async_congested; if (test_and_clear_bit(bit, &bdi->state)) atomic_dec(&nr_bdi_congested[sync]); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (waitqueue_active(wqh)) wake_up(wqh); } diff --git a/mm/filemap.c b/mm/filemap.c index a82fbe4c9e8e..c73535c914cc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -740,7 +740,7 @@ void unlock_page(struct page *page) { VM_BUG_ON_PAGE(!PageLocked(page), page); clear_bit_unlock(PG_locked, &page->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_page(page, PG_locked); } EXPORT_SYMBOL(unlock_page); @@ -757,7 +757,7 @@ void end_page_writeback(struct page *page) if (!test_clear_page_writeback(page)) BUG(); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_page(page, PG_writeback); } EXPORT_SYMBOL(end_page_writeback); diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 8c93267ce969..c4e09846d1de 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -252,7 +252,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) * we need to ensure there's a memory barrier after it. The bit * *must* be set before we do the atomic_inc() on pvcc->inflight. * There's no smp_mb__after_set_bit(), so it's this or abuse - * smp_mb__after_clear_bit(). + * smp_mb__after_atomic(). */ test_and_set_bit(BLOCKED, &pvcc->blocked); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 49774912cb01..74014420b3c7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -45,7 +45,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) return; clear_bit(HCI_INQUIRY, &hdev->flags); - smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); hci_conn_check_pending(hdev); @@ -1768,7 +1768,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) return; - smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) diff --git a/net/core/dev.c b/net/core/dev.c index 5b3042e69f85..e14f1cba591a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1326,7 +1326,7 @@ static int __dev_close_many(struct list_head *head) * dev->stop() will invoke napi_disable() on all of it's * napi_struct instances on this device. */ - smp_mb__after_clear_bit(); /* Commit netif_running(). */ + smp_mb__after_atomic(); /* Commit netif_running(). */ } dev_deactivate_many(head); @@ -3343,7 +3343,7 @@ static void net_tx_action(struct softirq_action *h) root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); qdisc_run(q); @@ -3353,7 +3353,7 @@ static void net_tx_action(struct softirq_action *h) &q->state)) { __netif_reschedule(q); } else { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); } @@ -4244,7 +4244,7 @@ void __napi_complete(struct napi_struct *n) BUG_ON(n->gro_list); list_del(&n->poll_list); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 9c3a839322ba..bd0767e6b2b3 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -147,7 +147,7 @@ static void linkwatch_do_dev(struct net_device *dev) * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); /* We are about to handle this device, * so new events can be accepted diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 48f424465112..56cd458a1b8c 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(inet_getpeer); void inet_putpeer(struct inet_peer *p) { p->dtime = (__u32)jiffies; - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&p->refcnt); } EXPORT_SYMBOL_GPL(inet_putpeer); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 025e25093984..366cf06587b8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1930,10 +1930,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* It is possible TX completion already happened * before we set TSQ_THROTTLED, so we must * test again the condition. - * We abuse smp_mb__after_clear_bit() because - * there is no smp_mb__after_set_bit() yet */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (atomic_read(&sk->sk_wmem_alloc) > limit) break; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 75421f2ba8be..1f4f954c4b47 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -914,7 +914,7 @@ void nf_conntrack_free(struct nf_conn *ct) nf_ct_ext_destroy(ct); nf_ct_ext_free(ct); kmem_cache_free(net->ct.nf_conntrack_cachep, ct); - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&net->ct.count); } EXPORT_SYMBOL_GPL(nf_conntrack_free); diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index b7ebe23cdedf..d67de453c35a 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -598,7 +598,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, { atomic64_set(&ic->i_ack_next, seq); if (ack_required) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } } @@ -606,7 +606,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, static u64 rds_ib_get_ack(struct rds_ib_connection *ic) { clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return atomic64_read(&ic->i_ack_next); } diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 45033358358e..aa8bf6786008 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c @@ -429,7 +429,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, { atomic64_set(&ic->i_ack_next, seq); if (ack_required) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } } @@ -437,7 +437,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, static u64 rds_iw_get_ack(struct rds_iw_connection *ic) { clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return atomic64_read(&ic->i_ack_next); } diff --git a/net/rds/send.c b/net/rds/send.c index a82fb660ec00..23718160d71e 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -107,7 +107,7 @@ static int acquire_in_xmit(struct rds_connection *conn) static void release_in_xmit(struct rds_connection *conn) { clear_bit(RDS_IN_XMIT, &conn->c_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* * We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk @@ -661,7 +661,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack, /* order flag updates with spin locks */ if (!list_empty(&list)) - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); spin_unlock_irqrestore(&conn->c_lock, flags); @@ -691,7 +691,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) } /* order flag updates with the rs lock */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); spin_unlock_irqrestore(&rs->rs_lock, flags); diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 81cf5a4c5e40..53b17ca0dff5 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -93,7 +93,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, rm->m_ack_seq = tc->t_last_sent_nxt + sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); tc->t_last_expected_una = rm->m_ack_seq + 1; diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 5285ead196c0..247e973544bf 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -296,7 +296,7 @@ static void rpcauth_unhash_cred_locked(struct rpc_cred *cred) { hlist_del_rcu(&cred->cr_hash); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 36e431ee1c90..b6e440baccc3 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -143,7 +143,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) gss_get_ctx(ctx); rcu_assign_pointer(gss_cred->gc_ctx, ctx); set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); } diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 3513d559bc45..9761a0da964d 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -244,10 +244,10 @@ void xprt_free_bc_request(struct rpc_rqst *req) dprintk("RPC: free backchannel req=%p\n", req); req->rq_connect_cookie = xprt->connect_cookie - 1; - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (!xprt_need_to_requeue(xprt)) { /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index d173f79947c6..89d051de6b3e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -230,9 +230,9 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) { xprt->snd_task = NULL; if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_LOCKED, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } else queue_work(rpciod_workqueue, &xprt->task_cleanup); } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 25a3dcf15cae..402a7e9a16b7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -893,11 +893,11 @@ static void xs_close(struct rpc_xprt *xprt) xs_reset_transport(transport); xprt->reestablish_timeout = 0; - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); xprt_disconnect_done(xprt); } @@ -1497,12 +1497,12 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void xs_sock_mark_closed(struct rpc_xprt *xprt) @@ -1556,10 +1556,10 @@ static void xs_tcp_state_change(struct sock *sk) xprt->connect_cookie++; xprt->reestablish_timeout = 0; set_bit(XPRT_CLOSING, &xprt->state); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTED, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); break; case TCP_CLOSE_WAIT: @@ -1578,9 +1578,9 @@ static void xs_tcp_state_change(struct sock *sk) case TCP_LAST_ACK: set_bit(XPRT_CLOSING, &xprt->state); xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTED, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); break; case TCP_CLOSE: xs_tcp_cancel_linger_timeout(xprt); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index bb7e8ba821f4..749f80c21e22 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1207,7 +1207,7 @@ restart: sk->sk_state = TCP_ESTABLISHED; sock_hold(newsk); - smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ + smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ unix_peer(sk) = newsk; unix_state_unlock(sk); diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c index 8546711d12f9..70951fd9b354 100644 --- a/sound/pci/bt87x.c +++ b/sound/pci/bt87x.c @@ -443,7 +443,7 @@ static int snd_bt87x_pcm_open(struct snd_pcm_substream *substream) _error: clear_bit(0, &chip->opened); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return err; } @@ -458,7 +458,7 @@ static int snd_bt87x_close(struct snd_pcm_substream *substream) chip->substream = NULL; clear_bit(0, &chip->opened); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } -- cgit v1.2.3 From 073a77d03ee88ae3a5504b3f73632841a55d60a1 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 15 Apr 2014 19:47:38 +0800 Subject: regulator: tps65217: Remove *rdev[] from struct tps65217 Now this driver uses devm_regulator_register() so we don't need to save rdev pointer to tps->rdev[i] for cleanup. Signed-off-by: Axel Lin Acked-by: Lee Jones Signed-off-by: Mark Brown --- drivers/regulator/tps65217-regulator.c | 3 --- include/linux/mfd/tps65217.h | 1 - 2 files changed, 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c index 10b78d2b766a..8482f6ba08a1 100644 --- a/drivers/regulator/tps65217-regulator.c +++ b/drivers/regulator/tps65217-regulator.c @@ -257,9 +257,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev) pdev->name); return PTR_ERR(rdev); } - - /* Save regulator for cleanup */ - tps->rdev[i] = rdev; } return 0; } diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index 54b5458ec084..95d6938737fd 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -254,7 +254,6 @@ struct tps65217 { struct tps65217_board *pdata; unsigned long id; struct regulator_desc desc[TPS65217_NUM_REGULATOR]; - struct regulator_dev *rdev[TPS65217_NUM_REGULATOR]; struct regmap *regmap; }; -- cgit v1.2.3 From 290414499cf94284a97cc3c33214d13ccfcd896a Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Wed, 16 Apr 2014 16:12:28 -0700 Subject: regulator: tps65090: Allow setting the overcurrent wait time The tps65090 regulator allows you to specify how long you want it to wait before detecting an overcurrent condition. Allow specifying that through the device tree (or through platform data). Signed-off-by: Doug Anderson Signed-off-by: Simon Glass Signed-off-by: Michael Spang Signed-off-by: Sean Paul Signed-off-by: Mark Brown --- .../devicetree/bindings/regulator/tps65090.txt | 4 ++ drivers/regulator/tps65090-regulator.c | 56 ++++++++++++++++++++++ include/linux/mfd/tps65090.h | 5 ++ 3 files changed, 65 insertions(+) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt index 313a60ba61d8..340980239ea9 100644 --- a/Documentation/devicetree/bindings/regulator/tps65090.txt +++ b/Documentation/devicetree/bindings/regulator/tps65090.txt @@ -21,6 +21,10 @@ Optional properties: number should be provided. If it is externally controlled and no GPIO entry then driver will just configure this rails as external control and will not provide any enable/disable APIs. +- ti,overcurrent-wait: This is applicable to FET registers, which have a + poorly defined "overcurrent wait" field. If this property is present it + should be between 0 - 3. If this property isn't present we won't touch the + "overcurrent wait" field and we'll leave it to the BIOS/EC to deal with. Each regulator is defined using the standard binding for regulators. diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c index 2e92ef68574d..ca04e9f010e1 100644 --- a/drivers/regulator/tps65090-regulator.c +++ b/drivers/regulator/tps65090-regulator.c @@ -28,15 +28,58 @@ #include #include +#define CTRL_WT_BIT 2 /* Regulator wait time 0 bit */ + +#define MAX_OVERCURRENT_WAIT 3 /* Overcurrent wait must be <= this */ + +/** + * struct tps65090_regulator - Per-regulator data for a tps65090 regulator + * + * @dev: Pointer to our device. + * @desc: The struct regulator_desc for the regulator. + * @rdev: The struct regulator_dev for the regulator. + * @overcurrent_wait_valid: True if overcurrent_wait is valid. + * @overcurrent_wait: For FETs, the value to put in the WTFET bitfield. + */ + struct tps65090_regulator { struct device *dev; struct regulator_desc *desc; struct regulator_dev *rdev; + bool overcurrent_wait_valid; + int overcurrent_wait; }; static struct regulator_ops tps65090_ext_control_ops = { }; +/** + * tps65090_reg_set_overcurrent_wait - Setup overcurrent wait + * + * This will set the overcurrent wait time based on what's in the regulator + * info. + * + * @ri: Overall regulator data + * @rdev: Regulator device + * + * Return: 0 if no error, non-zero if there was an error writing the register. + */ +static int tps65090_reg_set_overcurrent_wait(struct tps65090_regulator *ri, + struct regulator_dev *rdev) +{ + int ret; + + ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + MAX_OVERCURRENT_WAIT << CTRL_WT_BIT, + ri->overcurrent_wait << CTRL_WT_BIT); + if (ret) { + dev_err(&rdev->dev, "Error updating overcurrent wait %#x\n", + rdev->desc->enable_reg); + } + + return ret; +} + static struct regulator_ops tps65090_reg_contol_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -209,6 +252,11 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data( rpdata->gpio = of_get_named_gpio(np, "dcdc-ext-control-gpios", 0); + if (of_property_read_u32(tps65090_matches[idx].of_node, + "ti,overcurrent-wait", + &rpdata->overcurrent_wait) == 0) + rpdata->overcurrent_wait_valid = true; + tps65090_pdata->reg_pdata[idx] = rpdata; } return tps65090_pdata; @@ -258,6 +306,8 @@ static int tps65090_regulator_probe(struct platform_device *pdev) ri = &pmic[num]; ri->dev = &pdev->dev; ri->desc = &tps65090_regulator_desc[num]; + ri->overcurrent_wait_valid = tps_pdata->overcurrent_wait_valid; + ri->overcurrent_wait = tps_pdata->overcurrent_wait; /* * TPS5090 DCDC support the control from external digital input. @@ -299,6 +349,12 @@ static int tps65090_regulator_probe(struct platform_device *pdev) } ri->rdev = rdev; + if (ri->overcurrent_wait_valid) { + ret = tps65090_reg_set_overcurrent_wait(ri, rdev); + if (ret < 0) + return ret; + } + /* Enable external control if it is require */ if (tps_pdata && is_dcdc(num) && tps_pdata->reg_init_data && tps_pdata->enable_ext_control) { diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h index 3f43069413e7..f25adfa97c73 100644 --- a/include/linux/mfd/tps65090.h +++ b/include/linux/mfd/tps65090.h @@ -78,11 +78,16 @@ struct tps65090 { * DCDC1, DCDC2 and DCDC3. * @gpio: Gpio number if external control is enabled and controlled through * gpio. + * @overcurrent_wait_valid: True if the overcurrent_wait should be applied. + * @overcurrent_wait: Value to set as the overcurrent wait time. This is the + * actual bitfield value, not a time in ms (valid value are 0 - 3). */ struct tps65090_regulator_plat_data { struct regulator_init_data *reg_init_data; bool enable_ext_control; int gpio; + bool overcurrent_wait_valid; + int overcurrent_wait; }; struct tps65090_platform_data { -- cgit v1.2.3 From 3ac170376f2c5123414e0267aa0f9cf218965e24 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 17 Apr 2014 11:40:11 +0200 Subject: regmap: add reg_read/reg_write callbacks to regmap_bus struct Some busses do not support sending/receiving multiple registers in one go. Such kind of busses just unpack the registers that have been previously packed by the regmap core or pack registers that will be later unpacked by the core code. Add reg_write and reg_read callbacks in order to optimize access through this kind of busses. Signed-off-by: Boris BREZILLON Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 26 ++++++++++++++++++++++++++ include/linux/regmap.h | 6 ++++++ 2 files changed, 32 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 63e30ef096e2..2209de0ceabc 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -35,10 +35,14 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change); +static int _regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val); static int _regmap_bus_read(void *context, unsigned int reg, unsigned int *val); static int _regmap_bus_formatted_write(void *context, unsigned int reg, unsigned int val); +static int _regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val); static int _regmap_bus_raw_write(void *context, unsigned int reg, unsigned int val); @@ -493,6 +497,12 @@ struct regmap *regmap_init(struct device *dev, map->reg_read = config->reg_read; map->reg_write = config->reg_write; + map->defer_caching = false; + goto skip_format_initialization; + } else if (!bus->read || !bus->write) { + map->reg_read = _regmap_bus_reg_read; + map->reg_write = _regmap_bus_reg_write; + map->defer_caching = false; goto skip_format_initialization; } else { @@ -1284,6 +1294,14 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, return ret; } +static int _regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct regmap *map = context; + + return map->bus->reg_write(map->bus_context, reg, val); +} + static int _regmap_bus_raw_write(void *context, unsigned int reg, unsigned int val) { @@ -1925,6 +1943,14 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return ret; } +static int _regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct regmap *map = context; + + return map->bus->reg_read(map->bus_context, reg, val); +} + static int _regmap_bus_read(void *context, unsigned int reg, unsigned int *val) { diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 85691b9b4fa7..7b0e4b425cdf 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -276,6 +276,10 @@ typedef int (*regmap_hw_async_write)(void *context, typedef int (*regmap_hw_read)(void *context, const void *reg_buf, size_t reg_size, void *val_buf, size_t val_size); +typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg, + unsigned int *val); +typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, + unsigned int val); typedef struct regmap_async *(*regmap_hw_async_alloc)(void); typedef void (*regmap_hw_free_context)(void *context); @@ -309,7 +313,9 @@ struct regmap_bus { regmap_hw_write write; regmap_hw_gather_write gather_write; regmap_hw_async_write async_write; + regmap_hw_reg_write reg_write; regmap_hw_read read; + regmap_hw_reg_read reg_read; regmap_hw_free_context free_context; regmap_hw_async_alloc async_alloc; u8 read_flag_mask; -- cgit v1.2.3 From e4fcb1d6148284a10c314fce2a488cf19ce886f6 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Wed, 16 Apr 2014 10:01:37 +0100 Subject: mfd: arizona: Factor out read of device tree GPIOs This patch factors out the reading of GPIOs for the Arizona devices into a helper function. Signed-off-by: Charles Keepax Acked-by: Lee Jones Signed-off-by: Mark Brown --- drivers/mfd/arizona-core.c | 31 ++++++++++++++++++++++--------- include/linux/mfd/arizona/core.h | 3 +++ 2 files changed, 25 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 1c3ae57082ed..37b5e1447d02 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -508,19 +508,32 @@ int arizona_of_get_type(struct device *dev) } EXPORT_SYMBOL_GPL(arizona_of_get_type); +int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop, + bool mandatory) +{ + int gpio; + + gpio = of_get_named_gpio(arizona->dev->of_node, prop, 0); + if (gpio < 0) { + if (mandatory) + dev_err(arizona->dev, + "Mandatory DT gpio %s missing/malformed: %d\n", + prop, gpio); + + gpio = 0; + } + + return gpio; +} +EXPORT_SYMBOL_GPL(arizona_of_get_named_gpio); + static int arizona_of_get_core_pdata(struct arizona *arizona) { + struct arizona_pdata *pdata = &arizona->pdata; int ret, i; - arizona->pdata.reset = of_get_named_gpio(arizona->dev->of_node, - "wlf,reset", 0); - if (arizona->pdata.reset < 0) - arizona->pdata.reset = 0; - - arizona->pdata.ldoena = of_get_named_gpio(arizona->dev->of_node, - "wlf,ldoena", 0); - if (arizona->pdata.ldoena < 0) - arizona->pdata.ldoena = 0; + pdata->reset = arizona_of_get_named_gpio(arizona, "wlf,reset", true); + pdata->ldoena = arizona_of_get_named_gpio(arizona, "wlf,ldoena", true); ret = of_property_read_u32_array(arizona->dev->of_node, "wlf,gpio-defaults", diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index 5cf8b91ce996..6d9371f88875 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -124,4 +124,7 @@ int wm5102_patch(struct arizona *arizona); int wm5110_patch(struct arizona *arizona); int wm8997_patch(struct arizona *arizona); +extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop, + bool mandatory); + #endif -- cgit v1.2.3 From 4525beeb9aadbb9e1cb3e9e135f4371553f26a70 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 16 Apr 2014 15:20:44 -0500 Subject: usb: phy: rename usb_nop_xceiv to usb_phy_generic no functional changes, just renaming the function in order to make it slightly clearer what it should be used for, also matching the driver name. Signed-off-by: Felipe Balbi --- arch/arm/mach-omap2/usb-host.c | 8 +++--- drivers/usb/dwc3/dwc3-exynos.c | 6 ++--- drivers/usb/dwc3/dwc3-pci.c | 6 ++--- drivers/usb/musb/am35x.c | 4 +-- drivers/usb/musb/blackfin.c | 4 +-- drivers/usb/musb/da8xx.c | 4 +-- drivers/usb/musb/davinci.c | 6 ++--- drivers/usb/musb/tusb6010.c | 6 ++--- drivers/usb/phy/phy-am335x.c | 2 +- drivers/usb/phy/phy-generic.c | 50 +++++++++++++++++------------------ drivers/usb/phy/phy-generic.h | 6 ++--- drivers/usb/phy/phy-keystone.c | 2 +- include/linux/usb/usb_phy_gen_xceiv.h | 10 +++---- 13 files changed, 57 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index 10855eb4ccc1..ab983cdd3edf 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c @@ -349,7 +349,7 @@ static struct fixed_voltage_config hsusb_reg_config = { /* .init_data filled later */ }; -static const char *nop_name = "usb_phy_gen_xceiv"; /* NOP PHY driver */ +static const char *nop_name = "usb_phy_generic"; /* NOP PHY driver */ static const char *reg_name = "reg-fixed-voltage"; /* Regulator driver */ /** @@ -435,7 +435,7 @@ int usbhs_init_phys(struct usbhs_phy_data *phy, int num_phys) struct platform_device *pdev; char *phy_id; struct platform_device_info pdevinfo; - struct usb_phy_gen_xceiv_platform_data nop_pdata; + struct usb_phy_generic_platform_data nop_pdata; for (i = 0; i < num_phys; i++) { @@ -469,8 +469,8 @@ int usbhs_init_phys(struct usbhs_phy_data *phy, int num_phys) pdevinfo.id = phy->port; pdevinfo.data = &nop_pdata; pdevinfo.size_data = - sizeof(struct usb_phy_gen_xceiv_platform_data); - scnprintf(phy_id, MAX_STR, "usb_phy_gen_xceiv.%d", + sizeof(struct usb_phy_generic_platform_data); + scnprintf(phy_id, MAX_STR, "usb_phy_generic.%d", phy->port); pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) { diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 28c8ad79f5e6..821cc59e6e1d 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -38,13 +38,13 @@ struct dwc3_exynos { static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos) { - struct usb_phy_gen_xceiv_platform_data pdata; + struct usb_phy_generic_platform_data pdata; struct platform_device *pdev; int ret; memset(&pdata, 0x00, sizeof(pdata)); - pdev = platform_device_alloc("usb_phy_gen_xceiv", PLATFORM_DEVID_AUTO); + pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO); if (!pdev) return -ENOMEM; @@ -56,7 +56,7 @@ static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos) if (ret) goto err1; - pdev = platform_device_alloc("usb_phy_gen_xceiv", PLATFORM_DEVID_AUTO); + pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO); if (!pdev) { ret = -ENOMEM; goto err1; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index f393c183cc69..8b162f0e293c 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -40,13 +40,13 @@ struct dwc3_pci { static int dwc3_pci_register_phys(struct dwc3_pci *glue) { - struct usb_phy_gen_xceiv_platform_data pdata; + struct usb_phy_generic_platform_data pdata; struct platform_device *pdev; int ret; memset(&pdata, 0x00, sizeof(pdata)); - pdev = platform_device_alloc("usb_phy_gen_xceiv", 0); + pdev = platform_device_alloc("usb_phy_generic", 0); if (!pdev) return -ENOMEM; @@ -58,7 +58,7 @@ static int dwc3_pci_register_phys(struct dwc3_pci *glue) if (ret) goto err1; - pdev = platform_device_alloc("usb_phy_gen_xceiv", 1); + pdev = platform_device_alloc("usb_phy_generic", 1); if (!pdev) { ret = -ENOMEM; goto err1; diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c index b3aa0184af9a..77ed66427969 100644 --- a/drivers/usb/musb/am35x.c +++ b/drivers/usb/musb/am35x.c @@ -360,7 +360,7 @@ static int am35x_musb_init(struct musb *musb) if (!rev) return -ENODEV; - usb_nop_xceiv_register(); + usb_phy_generic_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) return -EPROBE_DEFER; @@ -402,7 +402,7 @@ static int am35x_musb_exit(struct musb *musb) data->set_phy_power(0); usb_put_phy(musb->xceiv); - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); return 0; } diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 796677fa9a15..607f3ae04591 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -401,7 +401,7 @@ static int bfin_musb_init(struct musb *musb) } gpio_direction_output(musb->config->gpio_vrsel, 0); - usb_nop_xceiv_register(); + usb_phy_generic_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { gpio_free(musb->config->gpio_vrsel); @@ -426,7 +426,7 @@ static int bfin_musb_exit(struct musb *musb) gpio_free(musb->config->gpio_vrsel); usb_put_phy(musb->xceiv); - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); return 0; } diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index e3486de71995..bcdce8e64670 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -418,7 +418,7 @@ static int da8xx_musb_init(struct musb *musb) if (!rev) goto fail; - usb_nop_xceiv_register(); + usb_phy_generic_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { ret = -EPROBE_DEFER; @@ -453,7 +453,7 @@ static int da8xx_musb_exit(struct musb *musb) phy_off(); usb_put_phy(musb->xceiv); - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); return 0; } diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index c259dac9d056..c0e07eddb079 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -381,7 +381,7 @@ static int davinci_musb_init(struct musb *musb) u32 revision; int ret = -ENODEV; - usb_nop_xceiv_register(); + usb_phy_generic_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { ret = -EPROBE_DEFER; @@ -439,7 +439,7 @@ static int davinci_musb_init(struct musb *musb) fail: usb_put_phy(musb->xceiv); unregister: - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); return ret; } @@ -487,7 +487,7 @@ static int davinci_musb_exit(struct musb *musb) phy_off(); usb_put_phy(musb->xceiv); - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); return 0; } diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 4e9fb1d08698..0c0f5ee1e3f1 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -1065,7 +1065,7 @@ static int tusb_musb_init(struct musb *musb) void __iomem *sync = NULL; int ret; - usb_nop_xceiv_register(); + usb_phy_generic_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) return -EPROBE_DEFER; @@ -1117,7 +1117,7 @@ done: iounmap(sync); usb_put_phy(musb->xceiv); - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); } return ret; } @@ -1133,7 +1133,7 @@ static int tusb_musb_exit(struct musb *musb) iounmap(musb->sync_va); usb_put_phy(musb->xceiv); - usb_nop_xceiv_unregister(); + usb_phy_generic_unregister(); return 0; } diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 12fc3468a01e..bb866e466051 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -13,7 +13,7 @@ #include "phy-generic.h" struct am335x_phy { - struct usb_phy_gen_xceiv usb_phy_gen; + struct usb_phy_generic usb_phy_gen; struct phy_control *phy_ctrl; int id; }; diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 95e70bc384c2..e76ca4ca3a8a 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -43,32 +43,32 @@ static struct platform_device *pd; -void usb_nop_xceiv_register(void) +void usb_phy_generic_register(void) { if (pd) return; - pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0); + pd = platform_device_register_simple("usb_phy_generic", -1, NULL, 0); if (IS_ERR(pd)) { pr_err("Unable to register generic usb transceiver\n"); pd = NULL; return; } } -EXPORT_SYMBOL_GPL(usb_nop_xceiv_register); +EXPORT_SYMBOL_GPL(usb_phy_generic_register); -void usb_nop_xceiv_unregister(void) +void usb_phy_generic_unregister(void) { platform_device_unregister(pd); pd = NULL; } -EXPORT_SYMBOL_GPL(usb_nop_xceiv_unregister); +EXPORT_SYMBOL_GPL(usb_phy_generic_unregister); static int nop_set_suspend(struct usb_phy *x, int suspend) { return 0; } -static void nop_reset_set(struct usb_phy_gen_xceiv *nop, int asserted) +static void nop_reset_set(struct usb_phy_generic *nop, int asserted) { int value; @@ -87,7 +87,7 @@ static void nop_reset_set(struct usb_phy_gen_xceiv *nop, int asserted) int usb_gen_phy_init(struct usb_phy *phy) { - struct usb_phy_gen_xceiv *nop = dev_get_drvdata(phy->dev); + struct usb_phy_generic *nop = dev_get_drvdata(phy->dev); if (!IS_ERR(nop->vcc)) { if (regulator_enable(nop->vcc)) @@ -106,7 +106,7 @@ EXPORT_SYMBOL_GPL(usb_gen_phy_init); void usb_gen_phy_shutdown(struct usb_phy *phy) { - struct usb_phy_gen_xceiv *nop = dev_get_drvdata(phy->dev); + struct usb_phy_generic *nop = dev_get_drvdata(phy->dev); /* Assert RESET */ nop_reset_set(nop, 1); @@ -150,8 +150,8 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host) return 0; } -int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, - struct usb_phy_gen_xceiv_platform_data *pdata) +int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop, + struct usb_phy_generic_platform_data *pdata) { enum usb_phy_type type = USB_PHY_TYPE_USB2; int err; @@ -245,10 +245,10 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, } EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy); -static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) +static int usb_phy_generic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct usb_phy_gen_xceiv *nop; + struct usb_phy_generic *nop; int err; nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL); @@ -274,9 +274,9 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) return 0; } -static int usb_phy_gen_xceiv_remove(struct platform_device *pdev) +static int usb_phy_generic_remove(struct platform_device *pdev) { - struct usb_phy_gen_xceiv *nop = platform_get_drvdata(pdev); + struct usb_phy_generic *nop = platform_get_drvdata(pdev); usb_remove_phy(&nop->phy); @@ -290,29 +290,29 @@ static const struct of_device_id nop_xceiv_dt_ids[] = { MODULE_DEVICE_TABLE(of, nop_xceiv_dt_ids); -static struct platform_driver usb_phy_gen_xceiv_driver = { - .probe = usb_phy_gen_xceiv_probe, - .remove = usb_phy_gen_xceiv_remove, +static struct platform_driver usb_phy_generic_driver = { + .probe = usb_phy_generic_probe, + .remove = usb_phy_generic_remove, .driver = { - .name = "usb_phy_gen_xceiv", + .name = "usb_phy_generic", .owner = THIS_MODULE, .of_match_table = nop_xceiv_dt_ids, }, }; -static int __init usb_phy_gen_xceiv_init(void) +static int __init usb_phy_generic_init(void) { - return platform_driver_register(&usb_phy_gen_xceiv_driver); + return platform_driver_register(&usb_phy_generic_driver); } -subsys_initcall(usb_phy_gen_xceiv_init); +subsys_initcall(usb_phy_generic_init); -static void __exit usb_phy_gen_xceiv_exit(void) +static void __exit usb_phy_generic_exit(void) { - platform_driver_unregister(&usb_phy_gen_xceiv_driver); + platform_driver_unregister(&usb_phy_generic_driver); } -module_exit(usb_phy_gen_xceiv_exit); +module_exit(usb_phy_generic_exit); -MODULE_ALIAS("platform:usb_phy_gen_xceiv"); +MODULE_ALIAS("platform:usb_phy_generic"); MODULE_AUTHOR("Texas Instruments Inc"); MODULE_DESCRIPTION("NOP USB Transceiver driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h index 38a81f307b82..f32450ada12d 100644 --- a/drivers/usb/phy/phy-generic.h +++ b/drivers/usb/phy/phy-generic.h @@ -3,7 +3,7 @@ #include -struct usb_phy_gen_xceiv { +struct usb_phy_generic { struct usb_phy phy; struct device *dev; struct clk *clk; @@ -15,7 +15,7 @@ struct usb_phy_gen_xceiv { int usb_gen_phy_init(struct usb_phy *phy); void usb_gen_phy_shutdown(struct usb_phy *phy); -int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, - struct usb_phy_gen_xceiv_platform_data *pdata); +int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop, + struct usb_phy_generic_platform_data *pdata); #endif diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c index d762003896c0..2404c442c302 100644 --- a/drivers/usb/phy/phy-keystone.c +++ b/drivers/usb/phy/phy-keystone.c @@ -35,7 +35,7 @@ #define PHY_REF_SSP_EN BIT(29) struct keystone_usbphy { - struct usb_phy_gen_xceiv usb_phy_gen; + struct usb_phy_generic usb_phy_gen; void __iomem *phy_ctrl; }; diff --git a/include/linux/usb/usb_phy_gen_xceiv.h b/include/linux/usb/usb_phy_gen_xceiv.h index cc8d818a83be..c00176d48625 100644 --- a/include/linux/usb/usb_phy_gen_xceiv.h +++ b/include/linux/usb/usb_phy_gen_xceiv.h @@ -3,7 +3,7 @@ #include -struct usb_phy_gen_xceiv_platform_data { +struct usb_phy_generic_platform_data { enum usb_phy_type type; unsigned long clk_rate; @@ -15,14 +15,14 @@ struct usb_phy_gen_xceiv_platform_data { #if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) /* sometimes transceivers are accessed only through e.g. ULPI */ -extern void usb_nop_xceiv_register(void); -extern void usb_nop_xceiv_unregister(void); +extern void usb_phy_generic_register(void); +extern void usb_phy_generic_unregister(void); #else -static inline void usb_nop_xceiv_register(void) +static inline void usb_phy_generic_register(void) { } -static inline void usb_nop_xceiv_unregister(void) +static inline void usb_phy_generic_unregister(void) { } #endif -- cgit v1.2.3 From d7078df6be6e9e5e3ac354859f5b8d60114391b4 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 16 Apr 2014 15:28:32 -0500 Subject: usb: phy: rename to now that all functions match the driver name, the only missing piece is to rename the header file itself. Signed-off-by: Felipe Balbi --- arch/arm/mach-omap2/board-omap3beagle.c | 1 - arch/arm/mach-omap2/usb-host.c | 2 +- drivers/usb/dwc3/dwc3-exynos.c | 2 +- drivers/usb/dwc3/dwc3-pci.c | 2 +- drivers/usb/musb/am35x.c | 2 +- drivers/usb/musb/blackfin.c | 2 +- drivers/usb/musb/da8xx.c | 2 +- drivers/usb/musb/davinci.c | 2 +- drivers/usb/musb/musb_dsps.c | 2 +- drivers/usb/musb/tusb6010.c | 2 +- drivers/usb/phy/phy-am335x.c | 2 +- drivers/usb/phy/phy-generic.c | 2 +- drivers/usb/phy/phy-generic.h | 2 +- drivers/usb/phy/phy-keystone.c | 2 +- include/linux/usb/usb_phy_gen_xceiv.h | 30 ------------------------------ include/linux/usb/usb_phy_generic.h | 30 ++++++++++++++++++++++++++++++ 16 files changed, 43 insertions(+), 44 deletions(-) delete mode 100644 include/linux/usb/usb_phy_gen_xceiv.h create mode 100644 include/linux/usb/usb_phy_generic.h (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index d6ed819ff15c..660bfc5a70d7 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index ab983cdd3edf..745367c0c2bb 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include "soc.h" #include "omap_device.h" diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 821cc59e6e1d..ed22d722884e 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 8b162f0e293c..1ed95e0386eb 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -23,7 +23,7 @@ #include #include -#include +#include /* FIXME define these in */ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c index 77ed66427969..044cd824c70d 100644 --- a/drivers/usb/musb/am35x.c +++ b/drivers/usb/musb/am35x.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include "musb_core.h" diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 607f3ae04591..c9992a2eaaa8 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index bcdce8e64670..a0dabb05de76 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index c0e07eddb079..737035457858 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 3372ded5def7..18882924d9d5 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 0c0f5ee1e3f1..8d4a8194c8f2 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include "musb_core.h" diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index bb866e466051..585e50cb1980 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index e76ca4ca3a8a..2c49cd8f6d25 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h index f32450ada12d..d8feacc0b7fb 100644 --- a/drivers/usb/phy/phy-generic.h +++ b/drivers/usb/phy/phy-generic.h @@ -1,7 +1,7 @@ #ifndef _PHY_GENERIC_H_ #define _PHY_GENERIC_H_ -#include +#include struct usb_phy_generic { struct usb_phy phy; diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c index 2404c442c302..f4d722de912b 100644 --- a/drivers/usb/phy/phy-keystone.c +++ b/drivers/usb/phy/phy-keystone.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/include/linux/usb/usb_phy_gen_xceiv.h b/include/linux/usb/usb_phy_gen_xceiv.h deleted file mode 100644 index c00176d48625..000000000000 --- a/include/linux/usb/usb_phy_gen_xceiv.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __LINUX_USB_NOP_XCEIV_H -#define __LINUX_USB_NOP_XCEIV_H - -#include - -struct usb_phy_generic_platform_data { - enum usb_phy_type type; - unsigned long clk_rate; - - /* if set fails with -EPROBE_DEFER if can't get regulator */ - unsigned int needs_vcc:1; - unsigned int needs_reset:1; /* deprecated */ - int gpio_reset; -}; - -#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) -/* sometimes transceivers are accessed only through e.g. ULPI */ -extern void usb_phy_generic_register(void); -extern void usb_phy_generic_unregister(void); -#else -static inline void usb_phy_generic_register(void) -{ -} - -static inline void usb_phy_generic_unregister(void) -{ -} -#endif - -#endif /* __LINUX_USB_NOP_XCEIV_H */ diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h new file mode 100644 index 000000000000..c00176d48625 --- /dev/null +++ b/include/linux/usb/usb_phy_generic.h @@ -0,0 +1,30 @@ +#ifndef __LINUX_USB_NOP_XCEIV_H +#define __LINUX_USB_NOP_XCEIV_H + +#include + +struct usb_phy_generic_platform_data { + enum usb_phy_type type; + unsigned long clk_rate; + + /* if set fails with -EPROBE_DEFER if can't get regulator */ + unsigned int needs_vcc:1; + unsigned int needs_reset:1; /* deprecated */ + int gpio_reset; +}; + +#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) +/* sometimes transceivers are accessed only through e.g. ULPI */ +extern void usb_phy_generic_register(void); +extern void usb_phy_generic_unregister(void); +#else +static inline void usb_phy_generic_register(void) +{ +} + +static inline void usb_phy_generic_unregister(void) +{ +} +#endif + +#endif /* __LINUX_USB_NOP_XCEIV_H */ -- cgit v1.2.3 From 2f36ff6915c6c00df8b9962d9c6c7992befcf8ce Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 16 Apr 2014 16:16:33 -0500 Subject: usb: phy: generic: allow multiples calls to usb_phy_generic_register() it's now very easy to return a platform_device pointer and have the caller pass it as argument when calling usb_phy_generic_unregister(). Signed-off-by: Felipe Balbi --- drivers/usb/musb/am35x.c | 12 +++++++++--- drivers/usb/musb/blackfin.c | 10 ++++++++-- drivers/usb/musb/da8xx.c | 14 +++++++++++--- drivers/usb/musb/tusb6010.c | 3 ++- drivers/usb/phy/phy-generic.c | 19 +++++-------------- include/linux/usb/usb_phy_generic.h | 9 +++++---- 6 files changed, 40 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c index 05459b56b2a8..0a34dd859555 100644 --- a/drivers/usb/musb/am35x.c +++ b/drivers/usb/musb/am35x.c @@ -85,6 +85,7 @@ struct am35x_glue { struct device *dev; struct platform_device *musb; + struct platform_device *phy; struct clk *phy_clk; struct clk *clk; }; @@ -503,7 +504,9 @@ static int am35x_probe(struct platform_device *pdev) pdata->platform_ops = &am35x_ops; - usb_phy_generic_register(); + glue->phy = usb_phy_generic_register(); + if (IS_ERR(glue->phy)) + goto err7; platform_set_drvdata(pdev, glue); pinfo = am35x_dev_info; @@ -517,11 +520,14 @@ static int am35x_probe(struct platform_device *pdev) if (IS_ERR(musb)) { ret = PTR_ERR(musb); dev_err(&pdev->dev, "failed to register musb device: %d\n", ret); - goto err7; + goto err8; } return 0; +err8: + usb_phy_generic_unregister(glue->phy); + err7: clk_disable(clk); @@ -546,7 +552,7 @@ static int am35x_remove(struct platform_device *pdev) struct am35x_glue *glue = platform_get_drvdata(pdev); platform_device_unregister(glue->musb); - usb_phy_generic_unregister(); + usb_phy_generic_unregister(glue->phy); clk_disable(glue->clk); clk_disable(glue->phy_clk); clk_put(glue->clk); diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 53acffe9a858..d40d5f0b5528 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -29,6 +29,7 @@ struct bfin_glue { struct device *dev; struct platform_device *musb; + struct platform_device *phy; }; #define glue_to_musb(g) platform_get_drvdata(g->musb) @@ -475,7 +476,9 @@ static int bfin_probe(struct platform_device *pdev) pdata->platform_ops = &bfin_ops; - usb_phy_generic_register(); + glue->phy = usb_phy_generic_register(); + if (IS_ERR(glue->phy)) + goto err2; platform_set_drvdata(pdev, glue); memset(musb_resources, 0x00, sizeof(*musb_resources) * @@ -513,6 +516,9 @@ static int bfin_probe(struct platform_device *pdev) return 0; err3: + usb_phy_generic_unregister(glue->phy); + +err2: platform_device_put(musb); err1: @@ -527,7 +533,7 @@ static int bfin_remove(struct platform_device *pdev) struct bfin_glue *glue = platform_get_drvdata(pdev); platform_device_unregister(glue->musb); - usb_phy_generic_unregister(); + usb_phy_generic_unregister(glue->phy); kfree(glue); return 0; diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 024751f9b31d..058775e647ad 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -85,6 +85,7 @@ struct da8xx_glue { struct device *dev; struct platform_device *musb; + struct platform_device *phy; struct clk *clk; }; @@ -510,7 +511,11 @@ static int da8xx_probe(struct platform_device *pdev) pdata->platform_ops = &da8xx_ops; - usb_phy_generic_register(); + glue->phy = usb_phy_generic_register(); + if (IS_ERR(glue->phy)) { + ret = PTR_ERR(glue->phy); + goto err5; + } platform_set_drvdata(pdev, glue); memset(musb_resources, 0x00, sizeof(*musb_resources) * @@ -537,11 +542,14 @@ static int da8xx_probe(struct platform_device *pdev) if (IS_ERR(musb)) { ret = PTR_ERR(musb); dev_err(&pdev->dev, "failed to register musb device: %d\n", ret); - goto err5; + goto err6; } return 0; +err6: + usb_phy_generic_unregister(glue->phy); + err5: clk_disable(clk); @@ -560,7 +568,7 @@ static int da8xx_remove(struct platform_device *pdev) struct da8xx_glue *glue = platform_get_drvdata(pdev); platform_device_unregister(glue->musb); - usb_phy_generic_unregister(); + usb_phy_generic_unregister(glue->phy); clk_disable(glue->clk); clk_put(glue->clk); kfree(glue); diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index e1da199c6f21..f38a8dbd6075 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -31,6 +31,7 @@ struct tusb6010_glue { struct device *dev; struct platform_device *musb; + struct platform_device *phy; }; static void tusb_musb_set_vbus(struct musb *musb, int is_on); @@ -1222,7 +1223,7 @@ static int tusb_remove(struct platform_device *pdev) struct tusb6010_glue *glue = platform_get_drvdata(pdev); platform_device_unregister(glue->musb); - usb_phy_generic_unregister(); + usb_phy_generic_unregister(glue->phy); kfree(glue); return 0; diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 2c49cd8f6d25..7594e5069ae5 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -41,25 +41,16 @@ #include "phy-generic.h" -static struct platform_device *pd; - -void usb_phy_generic_register(void) +struct platform_device *usb_phy_generic_register(void) { - if (pd) - return; - pd = platform_device_register_simple("usb_phy_generic", -1, NULL, 0); - if (IS_ERR(pd)) { - pr_err("Unable to register generic usb transceiver\n"); - pd = NULL; - return; - } + return platform_device_register_simple("usb_phy_generic", + PLATFORM_DEVID_AUTO, NULL, 0); } EXPORT_SYMBOL_GPL(usb_phy_generic_register); -void usb_phy_generic_unregister(void) +void usb_phy_generic_unregister(struct platform_device *pdev) { - platform_device_unregister(pd); - pd = NULL; + platform_device_unregister(pdev); } EXPORT_SYMBOL_GPL(usb_phy_generic_unregister); diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h index c00176d48625..8346bcc50c2f 100644 --- a/include/linux/usb/usb_phy_generic.h +++ b/include/linux/usb/usb_phy_generic.h @@ -15,14 +15,15 @@ struct usb_phy_generic_platform_data { #if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) /* sometimes transceivers are accessed only through e.g. ULPI */ -extern void usb_phy_generic_register(void); -extern void usb_phy_generic_unregister(void); +extern struct platform_device *usb_phy_generic_register(void); +extern void usb_phy_generic_unregister(struct platform_device *); #else -static inline void usb_phy_generic_register(void) +static inline struct platform_device *usb_phy_generic_register(void) { + return NULL; } -static inline void usb_phy_generic_unregister(void) +static inline void usb_phy_generic_unregister(struct platform_device *pdev) { } #endif -- cgit v1.2.3 From dca769bd5a76e9e634cc36987760306846153cac Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Mon, 21 Apr 2014 10:50:35 -0500 Subject: usb: phy: generic: switch over to IS_ENABLED() when checking if our generic PHY is enabled, it's a lot easier to use IS_ENABLED() instead of manually checking for it. While at that, also remove the bogus defined(MODULE) at the end of the line. Signed-off-by: Felipe Balbi --- include/linux/usb/usb_phy_generic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h index 8346bcc50c2f..68adae83affc 100644 --- a/include/linux/usb/usb_phy_generic.h +++ b/include/linux/usb/usb_phy_generic.h @@ -13,7 +13,7 @@ struct usb_phy_generic_platform_data { int gpio_reset; }; -#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) +#if IS_ENABLED(CONFIG_NOP_USB_XCEIV) /* sometimes transceivers are accessed only through e.g. ULPI */ extern struct platform_device *usb_phy_generic_register(void); extern void usb_phy_generic_unregister(struct platform_device *); -- cgit v1.2.3 From e6853aafd4339dbf2992957ff2616ef7164bc9d4 Mon Sep 17 00:00:00 2001 From: David Ung Date: Wed, 26 Mar 2014 15:35:37 -0700 Subject: video: Check EDID for HDMI connection Check EDID Vendor Specific Data Block bytes to see if the connection is HDMI and set FB_MISC_HDMI. Signed-off-by: David Ung Signed-off-by: Christopher Freeman Signed-off-by: Tomi Valkeinen --- drivers/video/fbdev/core/fbmon.c | 9 ++++++++- include/linux/fb.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index c204ebe6187e..5b0e313849bd 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1012,13 +1012,20 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) while (pos < edid[2]) { u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7; pr_debug("Data block %u of %u bytes\n", type, len); - if (type == 2) + if (type == 2) { for (i = pos; i < pos + len; i++) { u8 idx = edid[pos + i] & 0x7f; svd[svd_n++] = idx; pr_debug("N%sative mode #%d\n", edid[pos + i] & 0x80 ? "" : "on-n", idx); } + } else if (type == 3 && len >= 3) { + /* Check Vendor Specific Data Block. For HDMI, + it is always 00-0C-03 for HDMI Licensing, LLC. */ + if (edid[pos + 1] == 3 && edid[pos + 2] == 0xc && + edid[pos + 3] == 0) + specs->misc |= FB_MISC_HDMI; + } pos += len + 1; } diff --git a/include/linux/fb.h b/include/linux/fb.h index fe6ac956550e..506242979eea 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -47,6 +47,7 @@ struct device_node; #define FB_MISC_PRIM_COLOR 1 #define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ +#define FB_MISC_HDMI 4 struct fb_chroma { __u32 redx; /* in fraction of 1024 */ __u32 greenx; -- cgit v1.2.3 From dfeec843fb237d73947e818f961e8d6f0df22b01 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 1 Jan 2014 16:09:21 +0100 Subject: KVM: add kvm_is_error_gpa() helper It's quite common (in the s390 guest access code) to test if a guest physical address points to a valid guest memory area or not. So add a simple helper function in common code, since this might be of interest for other architectures as well. Signed-off-by: Heiko Carstens Reviewed-by: Thomas Huth Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- include/linux/kvm_host.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7d21cf9f4380..471d1400c4ac 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -879,6 +879,13 @@ static inline hpa_t pfn_to_hpa(pfn_t pfn) return (hpa_t)pfn << PAGE_SHIFT; } +static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) +{ + unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + + return kvm_is_error_hva(hva); +} + static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) { set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); -- cgit v1.2.3 From 8df4053f0532df8fe47d0434af51676b0fa65491 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 14 Apr 2014 14:41:56 +0300 Subject: platform_data: edma: Be precise with the paRAM struct The edmacc_param struct should follow the layout of the paRAM area in the HW. Be explicit on the size of the fields (u32) and also mark the struct as packed to avoid any padding on non 32bit architectures. Signed-off-by: Peter Ujfalusi Acked-by: Joel Fernandes Reviewed-and-Tested-by: Joel Fernandes Signed-off-by: Vinod Koul --- include/linux/platform_data/edma.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index f50821cb64be..923f8a3e4ce0 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -43,15 +43,15 @@ /* PaRAM slots are laid out like this */ struct edmacc_param { - unsigned int opt; - unsigned int src; - unsigned int a_b_cnt; - unsigned int dst; - unsigned int src_dst_bidx; - unsigned int link_bcntrld; - unsigned int src_dst_cidx; - unsigned int ccnt; -}; + u32 opt; + u32 src; + u32 a_b_cnt; + u32 dst; + u32 src_dst_bidx; + u32 link_bcntrld; + u32 src_dst_cidx; + u32 ccnt; +} __packed; /* fields in edmacc_param.opt */ #define SAM BIT(0) -- cgit v1.2.3 From c04ae71c9c264312a6f57d2665a79f7bbccf8758 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 8 Apr 2014 17:33:19 -0700 Subject: sched_clock: Remove deprecated setup_sched_clock() API Remove the 32-bit only setup_sched_clock() API now that all users have been converted to the 64-bit friendly sched_clock_register(). Signed-off-by: Stephen Boyd Signed-off-by: John Stultz --- include/linux/sched_clock.h | 1 - kernel/time/sched_clock.c | 13 ------------- 2 files changed, 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index cddf0c2940b6..efa931c5cef1 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -14,7 +14,6 @@ extern void sched_clock_postinit(void); static inline void sched_clock_postinit(void) { } #endif -extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); extern void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate); diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 4d23dc4d8139..445106d2c729 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -49,13 +49,6 @@ static u64 notrace jiffy_sched_clock_read(void) return (u64)(jiffies - INITIAL_JIFFIES); } -static u32 __read_mostly (*read_sched_clock_32)(void); - -static u64 notrace read_sched_clock_32_wrapper(void) -{ - return read_sched_clock_32(); -} - static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) @@ -176,12 +169,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits, pr_debug("Registered %pF as sched_clock source\n", read); } -void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) -{ - read_sched_clock_32 = read; - sched_clock_register(read_sched_clock_32_wrapper, bits, rate); -} - void __init sched_clock_postinit(void) { /* -- cgit v1.2.3 From a6c39cb4f71e61aff19d07e2d0b26bb6e3548fae Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 22 Apr 2014 15:09:05 -0600 Subject: fs/bio: remove bs paramater in biovec_create_pool bs is no longer used in biovec_create_pool since 9f060e2231ca96 ("block: Convert integrity to bvec_alloc_bs()") Signed-off-by: Fabian Frederick Cc: Jens Axboe Signed-off-by: Jens Axboe --- fs/bio-integrity.c | 2 +- fs/bio.c | 4 ++-- include/linux/bio.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 1c2ce0c87711..9e241063a616 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -617,7 +617,7 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size) if (!bs->bio_integrity_pool) return -1; - bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); + bs->bvec_integrity_pool = biovec_create_pool(pool_size); if (!bs->bvec_integrity_pool) { mempool_destroy(bs->bio_integrity_pool); return -1; diff --git a/fs/bio.c b/fs/bio.c index 4c9c5095bacb..ca55d37436d6 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1861,7 +1861,7 @@ EXPORT_SYMBOL_GPL(bio_trim); * create memory pools for biovec's in a bio_set. * use the global biovec slabs created for general use. */ -mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries) +mempool_t *biovec_create_pool(int pool_entries) { struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; @@ -1924,7 +1924,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) if (!bs->bio_pool) goto bad; - bs->bvec_pool = biovec_create_pool(bs, pool_size); + bs->bvec_pool = biovec_create_pool(pool_size); if (!bs->bvec_pool) goto bad; diff --git a/include/linux/bio.h b/include/linux/bio.h index bba550826921..5a645769f020 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -333,7 +333,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, extern struct bio_set *bioset_create(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); -extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries); +extern mempool_t *biovec_create_pool(int pool_entries); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); -- cgit v1.2.3 From 603fb42a66499ab353466c7afa3d38beea20a8a9 Mon Sep 17 00:00:00 2001 From: Sebastian Capella Date: Tue, 25 Mar 2014 01:20:29 +0100 Subject: ARM: 8011/1: ARM hibernation / suspend-to-disk Enable hibernation for ARM architectures and provide ARM architecture specific calls used during hibernation. The swsusp hibernation framework depends on the platform first having functional suspend/resume. Then, in order to enable hibernation on a given platform, a platform_hibernation_ops structure may need to be registered with the system in order to save/restore any SoC-specific / cpu specific state needing (re)init over a suspend-to-disk/resume-from-disk cycle. For example: - "secure" SoCs that have different sets of control registers and/or different CR reg access patterns. - SoCs with L2 caches as the activation sequence there is SoC-dependent; a full off-on cycle for L2 is not done by the hibernation support code. - SoCs requiring steps on wakeup _before_ the "generic" parts done by cpu_suspend / cpu_resume can work correctly. - SoCs having persistent state which is maintained during suspend and resume, but will be lost during the power off cycle after suspend-to-disk. This is a rebase/rework of Frank Hofmann's v5 hibernation patchset. Acked-by: Russ Dill Cc: "Rafael J. Wysocki" Signed-off-by: Sebastian Capella Acked-by: Pavel Machek Reviewed-by: Lorenzo Pieralisi [fixed duplicate virt_to_pfn() definition --rmk] Signed-off-by: Russell King --- arch/arm/Kconfig | 5 +++ arch/arm/kernel/Makefile | 1 + arch/arm/kernel/hibernate.c | 107 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/suspend.h | 2 + 4 files changed, 115 insertions(+) create mode 100644 arch/arm/kernel/hibernate.c (limited to 'include/linux') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ab438cb5af55..58506175a3ea 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2294,6 +2294,11 @@ config ARCH_SUSPEND_POSSIBLE config ARM_CPU_SUSPEND def_bool PM_SLEEP +config ARCH_HIBERNATION_POSSIBLE + bool + depends on MMU + default y if ARCH_SUSPEND_POSSIBLE + endmenu source "net/Kconfig" diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index a766bcbaf8ad..10f0464206a2 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o +obj-$(CONFIG_HIBERNATION) += hibernate.o obj-$(CONFIG_SMP) += smp.o ifdef CONFIG_MMU obj-$(CONFIG_SMP) += smp_tlb.o diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c new file mode 100644 index 000000000000..bb8b79648643 --- /dev/null +++ b/arch/arm/kernel/hibernate.c @@ -0,0 +1,107 @@ +/* + * Hibernation support specific for ARM + * + * Derived from work on ARM hibernation support by: + * + * Ubuntu project, hibernation support for mach-dove + * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) + * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) + * https://lkml.org/lkml/2010/6/18/4 + * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html + * https://patchwork.kernel.org/patch/96442/ + * + * Copyright (C) 2006 Rafael J. Wysocki + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include +#include +#include +#include +#include +#include + +extern const void __nosave_begin, __nosave_end; + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); + unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); + + return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); +} + +void notrace save_processor_state(void) +{ + WARN_ON(num_online_cpus() != 1); + local_fiq_disable(); +} + +void notrace restore_processor_state(void) +{ + local_fiq_enable(); +} + +/* + * Snapshot kernel memory and reset the system. + * + * swsusp_save() is executed in the suspend finisher so that the CPU + * context pointer and memory are part of the saved image, which is + * required by the resume kernel image to restart execution from + * swsusp_arch_suspend(). + * + * soft_restart is not technically needed, but is used to get success + * returned from cpu_suspend. + * + * When soft reboot completes, the hibernation snapshot is written out. + */ +static int notrace arch_save_image(unsigned long unused) +{ + int ret; + + ret = swsusp_save(); + if (ret == 0) + soft_restart(virt_to_phys(cpu_resume)); + return ret; +} + +/* + * Save the current CPU state before suspend / poweroff. + */ +int notrace swsusp_arch_suspend(void) +{ + return cpu_suspend(0, arch_save_image); +} + +/* + * Restore page contents for physical pages that were in use during loading + * hibernation image. Switch to idmap_pgd so the physical page tables + * are overwritten with the same contents. + */ +static void notrace arch_restore_image(void *unused) +{ + struct pbe *pbe; + + cpu_switch_mm(idmap_pgd, &init_mm); + for (pbe = restore_pblist; pbe; pbe = pbe->next) + copy_page(pbe->orig_address, pbe->address); + + soft_restart(virt_to_phys(cpu_resume)); +} + +static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; + +/* + * Resume from the hibernation image. + * Due to the kernel heap / data restore, stack contents change underneath + * and that would make function calls impossible; switch to a temporary + * stack within the nosave region to avoid that problem. + */ +int swsusp_arch_resume(void) +{ + extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); + call_with_stack(arch_restore_image, 0, + resume_stack + ARRAY_SIZE(resume_stack)); + return 0; +} diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f73cabf59012..38bbf95109da 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -320,6 +320,8 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); extern bool system_entering_hibernation(void); +asmlinkage int swsusp_save(void); +extern struct pbe *restore_pblist; #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} -- cgit v1.2.3 From c42ba72ec3a7a1b6aa30122931f1f4b91b601c31 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Wed, 16 Apr 2014 16:12:27 -0700 Subject: mfd: tps65090: Stop caching most registers Nearly all of the registers in tps65090 combine control bits and status bits. Turn off caching of all registers except the select few that can be cached. In order to avoid adding more duplicate #defines, we also move some register offset definitions to the mfd driver (and resolve inconsistent names). Signed-off-by: Doug Anderson Acked-by: Mark Brown Signed-off-by: Lee Jones --- drivers/mfd/tps65090.c | 27 ++++++++++++++------------- drivers/power/tps65090-charger.c | 11 ----------- include/linux/mfd/tps65090.h | 14 ++++++++++++++ 3 files changed, 28 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c index c3cddb4c3a1a..1c3e6e2efe41 100644 --- a/drivers/mfd/tps65090.c +++ b/drivers/mfd/tps65090.c @@ -32,14 +32,6 @@ #define NUM_INT_REG 2 #define TOTAL_NUM_REG 0x18 -/* interrupt status registers */ -#define TPS65090_INT_STS 0x0 -#define TPS65090_INT_STS2 0x1 - -/* interrupt mask registers */ -#define TPS65090_INT_MSK 0x2 -#define TPS65090_INT_MSK2 0x3 - #define TPS65090_INT1_MASK_VAC_STATUS_CHANGE 1 #define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE 2 #define TPS65090_INT1_MASK_BAT_STATUS_CHANGE 3 @@ -144,17 +136,26 @@ static struct regmap_irq_chip tps65090_irq_chip = { .irqs = tps65090_irqs, .num_irqs = ARRAY_SIZE(tps65090_irqs), .num_regs = NUM_INT_REG, - .status_base = TPS65090_INT_STS, - .mask_base = TPS65090_INT_MSK, + .status_base = TPS65090_REG_INTR_STS, + .mask_base = TPS65090_REG_INTR_MASK, .mask_invert = true, }; static bool is_volatile_reg(struct device *dev, unsigned int reg) { - if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS2)) - return true; - else + /* Nearly all registers have status bits mixed in, except a few */ + switch (reg) { + case TPS65090_REG_INTR_MASK: + case TPS65090_REG_INTR_MASK2: + case TPS65090_REG_CG_CTRL0: + case TPS65090_REG_CG_CTRL1: + case TPS65090_REG_CG_CTRL2: + case TPS65090_REG_CG_CTRL3: + case TPS65090_REG_CG_CTRL4: + case TPS65090_REG_CG_CTRL5: return false; + } + return true; } static const struct regmap_config tps65090_regmap_config = { diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c index 8fc9d6df87f6..1685f63b9e5d 100644 --- a/drivers/power/tps65090-charger.c +++ b/drivers/power/tps65090-charger.c @@ -28,17 +28,6 @@ #include -#define TPS65090_REG_INTR_STS 0x00 -#define TPS65090_REG_INTR_MASK 0x02 -#define TPS65090_REG_CG_CTRL0 0x04 -#define TPS65090_REG_CG_CTRL1 0x05 -#define TPS65090_REG_CG_CTRL2 0x06 -#define TPS65090_REG_CG_CTRL3 0x07 -#define TPS65090_REG_CG_CTRL4 0x08 -#define TPS65090_REG_CG_CTRL5 0x09 -#define TPS65090_REG_CG_STATUS1 0x0a -#define TPS65090_REG_CG_STATUS2 0x0b - #define TPS65090_CHARGER_ENABLE BIT(0) #define TPS65090_VACG BIT(1) #define TPS65090_NOITERM BIT(5) diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h index 3f43069413e7..45f0f9d2ed25 100644 --- a/include/linux/mfd/tps65090.h +++ b/include/linux/mfd/tps65090.h @@ -64,6 +64,20 @@ enum { TPS65090_REGULATOR_MAX, }; +/* Register addresses */ +#define TPS65090_REG_INTR_STS 0x00 +#define TPS65090_REG_INTR_STS2 0x01 +#define TPS65090_REG_INTR_MASK 0x02 +#define TPS65090_REG_INTR_MASK2 0x03 +#define TPS65090_REG_CG_CTRL0 0x04 +#define TPS65090_REG_CG_CTRL1 0x05 +#define TPS65090_REG_CG_CTRL2 0x06 +#define TPS65090_REG_CG_CTRL3 0x07 +#define TPS65090_REG_CG_CTRL4 0x08 +#define TPS65090_REG_CG_CTRL5 0x09 +#define TPS65090_REG_CG_STATUS1 0x0a +#define TPS65090_REG_CG_STATUS2 0x0b + struct tps65090 { struct device *dev; struct regmap *rmap; -- cgit v1.2.3 From 575343d161d75dc1516f53436b9eb47d04eda938 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 11:17:13 +0200 Subject: mfd: max14577: Add muic prefix to regmap config Add muic prefix to regmap config to differentiate between another regmap config for MAX77836 PMIC node. Additionally remove unused symbols: MAX14577_REG_INVALID and max14577_irq_source. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones --- drivers/mfd/max14577.c | 9 +++++---- include/linux/mfd/max14577-private.h | 4 +--- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 5f13cefe8def..d180fae8e317 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -37,7 +37,7 @@ static struct mfd_cell max14577_devs[] = { { .name = "max14577-charger", }, }; -static bool max14577_volatile_reg(struct device *dev, unsigned int reg) +static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX14577_REG_INT1 ... MAX14577_REG_STATUS3: @@ -48,10 +48,10 @@ static bool max14577_volatile_reg(struct device *dev, unsigned int reg) return false; } -static const struct regmap_config max14577_regmap_config = { +static const struct regmap_config max14577_muic_regmap_config = { .reg_bits = 8, .val_bits = 8, - .volatile_reg = max14577_volatile_reg, + .volatile_reg = max14577_muic_volatile_reg, .max_register = MAX14577_REG_END, }; @@ -113,7 +113,8 @@ static int max14577_i2c_probe(struct i2c_client *i2c, max14577->i2c = i2c; max14577->irq = i2c->irq; - max14577->regmap = devm_regmap_init_i2c(i2c, &max14577_regmap_config); + max14577->regmap = devm_regmap_init_i2c(i2c, + &max14577_muic_regmap_config); if (IS_ERR(max14577->regmap)) { ret = PTR_ERR(max14577->regmap); dev_err(max14577->dev, "Failed to allocate register map: %d\n", diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index c9b332fb0d5d..97b78d94f92f 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -22,9 +22,7 @@ #include #include -#define MAX14577_REG_INVALID (0xff) - -/* Slave addr = 0x4A: Interrupt */ +/* Slave addr = 0x4A: MUIC and Charger */ enum max14577_reg { MAX14577_REG_DEVICEID = 0x00, MAX14577_REG_INT1 = 0x01, -- cgit v1.2.3 From eccb80cc22354a12255c2579247a92a30a4c881b Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 11:17:14 +0200 Subject: mfd: max14577: Add detection of device type This patch continues the preparation for adding support for MAX77836 device to existing max14577 driver. Add enum for types of devices supported by this driver. The device type will be detected by matching of_device_id, or i2c_device_id as a fallback. The patch also moves to separate function the code related to displaying DeviceID register values. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones --- drivers/mfd/max14577.c | 64 +++++++++++++++++++++++++----------- include/linux/mfd/max14577-private.h | 12 ++++--- 2 files changed, 53 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index d180fae8e317..0e07ed74ab41 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +38,14 @@ static struct mfd_cell max14577_devs[] = { { .name = "max14577-charger", }, }; +static struct of_device_id max14577_dt_match[] = { + { + .compatible = "maxim,max14577", + .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, + }, + {}, +}; + static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -83,13 +92,34 @@ static const struct regmap_irq_chip max14577_irq_chip = { .num_irqs = ARRAY_SIZE(max14577_irqs), }; +static void max14577_print_dev_type(struct max14577 *max14577) +{ + u8 reg_data, vendor_id, device_id; + int ret; + + ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID, + ®_data); + if (ret) { + dev_err(max14577->dev, + "Failed to read DEVICEID register: %d\n", ret); + return; + } + + vendor_id = ((reg_data & DEVID_VENDORID_MASK) >> + DEVID_VENDORID_SHIFT); + device_id = ((reg_data & DEVID_DEVICEID_MASK) >> + DEVID_DEVICEID_SHIFT); + + dev_info(max14577->dev, "Device type: %u (ID: 0x%x, vendor: 0x%x)\n", + max14577->dev_type, device_id, vendor_id); +} + static int max14577_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max14577 *max14577; struct max14577_platform_data *pdata = dev_get_platdata(&i2c->dev); struct device_node *np = i2c->dev.of_node; - u8 reg_data; int ret = 0; if (np) { @@ -122,19 +152,17 @@ static int max14577_i2c_probe(struct i2c_client *i2c, return ret; } - ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID, - ®_data); - if (ret) { - dev_err(max14577->dev, "Device not found on this channel: %d\n", - ret); - return ret; + if (np) { + const struct of_device_id *of_id; + + of_id = of_match_device(max14577_dt_match, &i2c->dev); + if (of_id) + max14577->dev_type = (unsigned int)of_id->data; + } else { + max14577->dev_type = id->driver_data; } - max14577->vendor_id = ((reg_data & DEVID_VENDORID_MASK) >> - DEVID_VENDORID_SHIFT); - max14577->device_id = ((reg_data & DEVID_DEVICEID_MASK) >> - DEVID_DEVICEID_SHIFT); - dev_info(max14577->dev, "Device ID: 0x%x, vendor: 0x%x\n", - max14577->device_id, max14577->vendor_id); + + max14577_print_dev_type(max14577); ret = regmap_add_irq_chip(max14577->regmap, max14577->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0, @@ -173,7 +201,7 @@ static int max14577_i2c_remove(struct i2c_client *i2c) } static const struct i2c_device_id max14577_i2c_id[] = { - { "max14577", 0 }, + { "max14577", MAXIM_DEVICE_TYPE_MAX14577, }, { } }; MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); @@ -216,11 +244,6 @@ static int max14577_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static struct of_device_id max14577_dt_match[] = { - { .compatible = "maxim,max14577", }, - {}, -}; - static SIMPLE_DEV_PM_OPS(max14577_pm, max14577_suspend, max14577_resume); static struct i2c_driver max14577_i2c_driver = { @@ -237,6 +260,9 @@ static struct i2c_driver max14577_i2c_driver = { static int __init max14577_i2c_init(void) { + BUILD_BUG_ON(ARRAY_SIZE(max14577_i2c_id) != MAXIM_DEVICE_TYPE_NUM); + BUILD_BUG_ON(ARRAY_SIZE(max14577_dt_match) != MAXIM_DEVICE_TYPE_NUM); + return i2c_add_driver(&max14577_i2c_driver); } subsys_initcall(max14577_i2c_init); diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index 97b78d94f92f..1ce6f2952cc9 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -22,6 +22,13 @@ #include #include +enum maxim_device_type { + MAXIM_DEVICE_TYPE_UNKNOWN = 0, + MAXIM_DEVICE_TYPE_MAX14577, + + MAXIM_DEVICE_TYPE_NUM, +}; + /* Slave addr = 0x4A: MUIC and Charger */ enum max14577_reg { MAX14577_REG_DEVICEID = 0x00, @@ -271,15 +278,12 @@ enum max14577_irq { struct max14577 { struct device *dev; struct i2c_client *i2c; /* Slave addr = 0x4A */ + enum maxim_device_type dev_type; struct regmap *regmap; struct regmap_irq_chip_data *irq_data; int irq; - - /* Device ID */ - u8 vendor_id; /* Vendor Identification */ - u8 device_id; /* Chip Version */ }; /* MAX14577 shared regmap API function */ -- cgit v1.2.3 From c7846852ec8f304c629963202fa565452e8fe34c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 11:17:17 +0200 Subject: mfd: max14577: Add MAX14577 prefix to IRQ defines This patch prepares for adding support for MAX77836 device to existing max14577 driver by adding MAX14577 prefix to defines of interrupts. This is only a rename-like patch, new code is not added. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones --- drivers/mfd/max14577.c | 24 ++++++++++++------------ include/linux/mfd/max14577-private.h | 28 ++++++++++++++-------------- 2 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 0e07ed74ab41..6f39dec9dfdf 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -66,20 +66,20 @@ static const struct regmap_config max14577_muic_regmap_config = { static const struct regmap_irq max14577_irqs[] = { /* INT1 interrupts */ - { .reg_offset = 0, .mask = INT1_ADC_MASK, }, - { .reg_offset = 0, .mask = INT1_ADCLOW_MASK, }, - { .reg_offset = 0, .mask = INT1_ADCERR_MASK, }, + { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, }, + { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, }, + { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, }, /* INT2 interrupts */ - { .reg_offset = 1, .mask = INT2_CHGTYP_MASK, }, - { .reg_offset = 1, .mask = INT2_CHGDETRUN_MASK, }, - { .reg_offset = 1, .mask = INT2_DCDTMR_MASK, }, - { .reg_offset = 1, .mask = INT2_DBCHG_MASK, }, - { .reg_offset = 1, .mask = INT2_VBVOLT_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_DCDTMR_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_DBCHG_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_VBVOLT_MASK, }, /* INT3 interrupts */ - { .reg_offset = 2, .mask = INT3_EOC_MASK, }, - { .reg_offset = 2, .mask = INT3_CGMBC_MASK, }, - { .reg_offset = 2, .mask = INT3_OVP_MASK, }, - { .reg_offset = 2, .mask = INT3_MBCCHGERR_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_EOC_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_CGMBC_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_OVP_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_MBCCHGERR_MASK, }, }; static const struct regmap_irq_chip max14577_irq_chip = { diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index 1ce6f2952cc9..989183d232cd 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -79,20 +79,20 @@ enum max14577_muic_charger_type { }; /* MAX14577 interrupts */ -#define INT1_ADC_MASK (0x1 << 0) -#define INT1_ADCLOW_MASK (0x1 << 1) -#define INT1_ADCERR_MASK (0x1 << 2) - -#define INT2_CHGTYP_MASK (0x1 << 0) -#define INT2_CHGDETRUN_MASK (0x1 << 1) -#define INT2_DCDTMR_MASK (0x1 << 2) -#define INT2_DBCHG_MASK (0x1 << 3) -#define INT2_VBVOLT_MASK (0x1 << 4) - -#define INT3_EOC_MASK (0x1 << 0) -#define INT3_CGMBC_MASK (0x1 << 1) -#define INT3_OVP_MASK (0x1 << 2) -#define INT3_MBCCHGERR_MASK (0x1 << 3) +#define MAX14577_INT1_ADC_MASK BIT(0) +#define MAX14577_INT1_ADCLOW_MASK BIT(1) +#define MAX14577_INT1_ADCERR_MASK BIT(2) + +#define MAX14577_INT2_CHGTYP_MASK BIT(0) +#define MAX14577_INT2_CHGDETRUN_MASK BIT(1) +#define MAX14577_INT2_DCDTMR_MASK BIT(2) +#define MAX14577_INT2_DBCHG_MASK BIT(3) +#define MAX14577_INT2_VBVOLT_MASK BIT(4) + +#define MAX14577_INT3_EOC_MASK BIT(0) +#define MAX14577_INT3_CGMBC_MASK BIT(1) +#define MAX14577_INT3_OVP_MASK BIT(2) +#define MAX14577_INT3_MBCCHGERR_MASK BIT(3) /* MAX14577 DEVICE ID register */ #define DEVID_VENDORID_SHIFT 0 -- cgit v1.2.3 From aee2a57c7482c712052b877218aa2c5bc0fe8626 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 11:17:18 +0200 Subject: mfd: max77836: Add MAX77836 support to max14577 driver Add Maxim 77836 support to max14577 driver. The chipsets have same MUIC component so the extcon, charger and regulators are almost the same. The MAX77836 however has also PMIC and Fuel Gauge. The MAX77836 uses three I2C slave addresses and has additional interrupts (related to PMIC and Fuel Gauge). It has also Interrupt Source register, just like MAX77686 and MAX77693. The MAX77836 PMIC's TOPSYS and INTSRC interrupts are reported in the PMIC block. The PMIC block has different I2C slave address and uses own regmap so another regmap_irq_chip is needed. Since we have two regmap_irq_chip, use shared interrupts on MAX77836. This patch adds additional defines and functions to the max14577 MFD core driver so the driver will handle both chipsets. Also this patch replaces "0x1 << N" with BIT(N) in defines for register masks. Signed-off-by: Krzysztof Kozlowski Acked-by: Chanwoo Choi Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 6 +- drivers/mfd/max14577.c | 217 +++++++++++++++++++++++++++++++++-- include/linux/mfd/max14577-private.h | 145 +++++++++++++++++------ include/linux/mfd/max14577.h | 7 +- 4 files changed, 330 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 33834120d057..5bdefe72625e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -331,15 +331,15 @@ config MFD_88PM860X battery-charger under the corresponding menus. config MFD_MAX14577 - bool "Maxim Semiconductor MAX14577 MUIC + Charger Support" + bool "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support" depends on I2C=y select MFD_CORE select REGMAP_I2C select REGMAP_IRQ select IRQ_DOMAIN help - Say yes here to add support for Maxim Semiconductor MAX14577. - This is a Micro-USB IC with Charger controls on chip. + Say yes here to add support for Maxim Semiconductor MAX14577 and + MAX77836 Micro-USB ICs with battery charger. This driver provides common support for accessing the device; additional drivers must be enabled in order to use the functionality of the device. diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 6f39dec9dfdf..20e3b2d81bf0 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -1,7 +1,7 @@ /* - * max14577.c - mfd core driver for the Maxim 14577 + * max14577.c - mfd core driver for the Maxim 14577/77836 * - * Copyright (C) 2013 Samsung Electrnoics + * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi * Krzysztof Kozlowski * @@ -38,11 +38,34 @@ static struct mfd_cell max14577_devs[] = { { .name = "max14577-charger", }, }; +static struct mfd_cell max77836_devs[] = { + { + .name = "max77836-muic", + .of_compatible = "maxim,max77836-muic", + }, + { + .name = "max77836-regulator", + .of_compatible = "maxim,max77836-regulator", + }, + { + .name = "max77836-charger", + .of_compatible = "maxim,max77836-charger", + }, + { + .name = "max77836-battery", + .of_compatible = "maxim,max77836-battery", + }, +}; + static struct of_device_id max14577_dt_match[] = { { .compatible = "maxim,max14577", .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, }, + { + .compatible = "maxim,max77836", + .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, + }, {}, }; @@ -57,6 +80,26 @@ static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg) return false; } +static bool max77836_muic_volatile_reg(struct device *dev, unsigned int reg) +{ + /* Any max14577 volatile registers are also max77836 volatile. */ + if (max14577_muic_volatile_reg(dev, reg)) + return true; + + switch (reg) { + case MAX77836_FG_REG_VCELL_MSB ... MAX77836_FG_REG_SOC_LSB: + case MAX77836_FG_REG_CRATE_MSB ... MAX77836_FG_REG_CRATE_LSB: + case MAX77836_FG_REG_STATUS_H ... MAX77836_FG_REG_STATUS_L: + case MAX77836_PMIC_REG_INTSRC: + case MAX77836_PMIC_REG_TOPSYS_INT: + case MAX77836_PMIC_REG_TOPSYS_STAT: + return true; + default: + break; + } + return false; +} + static const struct regmap_config max14577_muic_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -64,6 +107,13 @@ static const struct regmap_config max14577_muic_regmap_config = { .max_register = MAX14577_REG_END, }; +static const struct regmap_config max77836_pmic_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_reg = max77836_muic_volatile_reg, + .max_register = MAX77836_PMIC_REG_END, +}; + static const struct regmap_irq max14577_irqs[] = { /* INT1 interrupts */ { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, }, @@ -86,12 +136,56 @@ static const struct regmap_irq_chip max14577_irq_chip = { .name = "max14577", .status_base = MAX14577_REG_INT1, .mask_base = MAX14577_REG_INTMASK1, - .mask_invert = 1, + .mask_invert = true, .num_regs = 3, .irqs = max14577_irqs, .num_irqs = ARRAY_SIZE(max14577_irqs), }; +static const struct regmap_irq max77836_muic_irqs[] = { + /* INT1 interrupts */ + { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, }, + { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, }, + { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, }, + /* INT2 interrupts */ + { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_DCDTMR_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_DBCHG_MASK, }, + { .reg_offset = 1, .mask = MAX14577_INT2_VBVOLT_MASK, }, + { .reg_offset = 1, .mask = MAX77836_INT2_VIDRM_MASK, }, + /* INT3 interrupts */ + { .reg_offset = 2, .mask = MAX14577_INT3_EOC_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_CGMBC_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_OVP_MASK, }, + { .reg_offset = 2, .mask = MAX14577_INT3_MBCCHGERR_MASK, }, +}; + +static const struct regmap_irq_chip max77836_muic_irq_chip = { + .name = "max77836-muic", + .status_base = MAX14577_REG_INT1, + .mask_base = MAX14577_REG_INTMASK1, + .mask_invert = true, + .num_regs = 3, + .irqs = max77836_muic_irqs, + .num_irqs = ARRAY_SIZE(max77836_muic_irqs), +}; + +static const struct regmap_irq max77836_pmic_irqs[] = { + { .reg_offset = 0, .mask = MAX77836_TOPSYS_INT_T120C_MASK, }, + { .reg_offset = 0, .mask = MAX77836_TOPSYS_INT_T140C_MASK, }, +}; + +static const struct regmap_irq_chip max77836_pmic_irq_chip = { + .name = "max77836-pmic", + .status_base = MAX77836_PMIC_REG_TOPSYS_INT, + .mask_base = MAX77836_PMIC_REG_TOPSYS_INT_MASK, + .mask_invert = false, + .num_regs = 1, + .irqs = max77836_pmic_irqs, + .num_irqs = ARRAY_SIZE(max77836_pmic_irqs), +}; + static void max14577_print_dev_type(struct max14577 *max14577) { u8 reg_data, vendor_id, device_id; @@ -114,6 +208,81 @@ static void max14577_print_dev_type(struct max14577 *max14577) max14577->dev_type, device_id, vendor_id); } +/* + * Max77836 specific initialization code for driver probe. + * Adds new I2C dummy device, regmap and regmap IRQ chip. + * Unmasks Interrupt Source register. + * + * On success returns 0. + * On failure returns errno and reverts any changes done so far (e.g. remove + * I2C dummy device), except masking the INT SRC register. + */ +static int max77836_init(struct max14577 *max14577) +{ + int ret; + u8 intsrc_mask; + + max14577->i2c_pmic = i2c_new_dummy(max14577->i2c->adapter, + I2C_ADDR_PMIC); + if (!max14577->i2c_pmic) { + dev_err(max14577->dev, "Failed to register PMIC I2C device\n"); + return -ENODEV; + } + i2c_set_clientdata(max14577->i2c_pmic, max14577); + + max14577->regmap_pmic = devm_regmap_init_i2c(max14577->i2c_pmic, + &max77836_pmic_regmap_config); + if (IS_ERR(max14577->regmap_pmic)) { + ret = PTR_ERR(max14577->regmap_pmic); + dev_err(max14577->dev, "Failed to allocate PMIC register map: %d\n", + ret); + goto err; + } + + /* Un-mask MAX77836 Interrupt Source register */ + ret = max14577_read_reg(max14577->regmap_pmic, + MAX77836_PMIC_REG_INTSRC_MASK, &intsrc_mask); + if (ret < 0) { + dev_err(max14577->dev, "Failed to read PMIC register\n"); + goto err; + } + + intsrc_mask &= ~(MAX77836_INTSRC_MASK_TOP_INT_MASK); + intsrc_mask &= ~(MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK); + ret = max14577_write_reg(max14577->regmap_pmic, + MAX77836_PMIC_REG_INTSRC_MASK, intsrc_mask); + if (ret < 0) { + dev_err(max14577->dev, "Failed to write PMIC register\n"); + goto err; + } + + ret = regmap_add_irq_chip(max14577->regmap_pmic, max14577->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED, + 0, &max77836_pmic_irq_chip, + &max14577->irq_data_pmic); + if (ret != 0) { + dev_err(max14577->dev, "Failed to request PMIC IRQ %d: %d\n", + max14577->irq, ret); + goto err; + } + + return 0; + +err: + i2c_unregister_device(max14577->i2c_pmic); + + return ret; +} + +/* + * Max77836 specific de-initialization code for driver remove. + */ +static void max77836_remove(struct max14577 *max14577) +{ + regmap_del_irq_chip(max14577->irq, max14577->irq_data_pmic); + i2c_unregister_device(max14577->i2c_pmic); +} + static int max14577_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -121,6 +290,10 @@ static int max14577_i2c_probe(struct i2c_client *i2c, struct max14577_platform_data *pdata = dev_get_platdata(&i2c->dev); struct device_node *np = i2c->dev.of_node; int ret = 0; + const struct regmap_irq_chip *irq_chip; + struct mfd_cell *mfd_devs; + unsigned int mfd_devs_size; + int irq_flags; if (np) { pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL); @@ -164,9 +337,24 @@ static int max14577_i2c_probe(struct i2c_client *i2c, max14577_print_dev_type(max14577); + switch (max14577->dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + irq_chip = &max77836_muic_irq_chip; + mfd_devs = max77836_devs; + mfd_devs_size = ARRAY_SIZE(max77836_devs); + irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED; + break; + case MAXIM_DEVICE_TYPE_MAX14577: + default: + irq_chip = &max14577_irq_chip; + mfd_devs = max14577_devs; + mfd_devs_size = ARRAY_SIZE(max14577_devs); + irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; + break; + } + ret = regmap_add_irq_chip(max14577->regmap, max14577->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0, - &max14577_irq_chip, + irq_flags, 0, irq_chip, &max14577->irq_data); if (ret != 0) { dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", @@ -174,8 +362,15 @@ static int max14577_i2c_probe(struct i2c_client *i2c, return ret; } - ret = mfd_add_devices(max14577->dev, -1, max14577_devs, - ARRAY_SIZE(max14577_devs), NULL, 0, + /* Max77836 specific initialization code (additional regmap) */ + if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) { + ret = max77836_init(max14577); + if (ret < 0) + goto err_max77836; + } + + ret = mfd_add_devices(max14577->dev, -1, mfd_devs, + mfd_devs_size, NULL, 0, regmap_irq_get_domain(max14577->irq_data)); if (ret < 0) goto err_mfd; @@ -185,6 +380,9 @@ static int max14577_i2c_probe(struct i2c_client *i2c, return 0; err_mfd: + if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) + max77836_remove(max14577); +err_max77836: regmap_del_irq_chip(max14577->irq, max14577->irq_data); return ret; @@ -196,12 +394,15 @@ static int max14577_i2c_remove(struct i2c_client *i2c) mfd_remove_devices(max14577->dev); regmap_del_irq_chip(max14577->irq, max14577->irq_data); + if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) + max77836_remove(max14577); return 0; } static const struct i2c_device_id max14577_i2c_id[] = { { "max14577", MAXIM_DEVICE_TYPE_MAX14577, }, + { "max77836", MAXIM_DEVICE_TYPE_MAX77836, }, { } }; MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); @@ -274,5 +475,5 @@ static void __exit max14577_i2c_exit(void) module_exit(max14577_i2c_exit); MODULE_AUTHOR("Chanwoo Choi , Krzysztof Kozlowski "); -MODULE_DESCRIPTION("MAXIM 14577 multi-function core driver"); +MODULE_DESCRIPTION("Maxim 14577/77836 multi-function core driver"); MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index 989183d232cd..e301bd19b067 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -1,7 +1,7 @@ /* - * max14577-private.h - Common API for the Maxim 14577 internal sub chip + * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip * - * Copyright (C) 2013 Samsung Electrnoics + * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi * Krzysztof Kozlowski * @@ -22,9 +22,14 @@ #include #include +#define I2C_ADDR_PMIC (0x46 >> 1) +#define I2C_ADDR_MUIC (0x4A >> 1) +#define I2C_ADDR_FG (0x6C >> 1) + enum maxim_device_type { MAXIM_DEVICE_TYPE_UNKNOWN = 0, MAXIM_DEVICE_TYPE_MAX14577, + MAXIM_DEVICE_TYPE_MAX77836, MAXIM_DEVICE_TYPE_NUM, }; @@ -88,6 +93,7 @@ enum max14577_muic_charger_type { #define MAX14577_INT2_DCDTMR_MASK BIT(2) #define MAX14577_INT2_DBCHG_MASK BIT(3) #define MAX14577_INT2_VBVOLT_MASK BIT(4) +#define MAX77836_INT2_VIDRM_MASK BIT(5) #define MAX14577_INT3_EOC_MASK BIT(0) #define MAX14577_INT3_CGMBC_MASK BIT(1) @@ -104,9 +110,11 @@ enum max14577_muic_charger_type { #define STATUS1_ADC_SHIFT 0 #define STATUS1_ADCLOW_SHIFT 5 #define STATUS1_ADCERR_SHIFT 6 +#define MAX77836_STATUS1_ADC1K_SHIFT 7 #define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) -#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT) -#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT) +#define STATUS1_ADCLOW_MASK BIT(STATUS1_ADCLOW_SHIFT) +#define STATUS1_ADCERR_MASK BIT(STATUS1_ADCERR_SHIFT) +#define MAX77836_STATUS1_ADC1K_MASK BIT(MAX77836_STATUS1_ADC1K_SHIFT) /* MAX14577 STATUS2 register */ #define STATUS2_CHGTYP_SHIFT 0 @@ -114,11 +122,13 @@ enum max14577_muic_charger_type { #define STATUS2_DCDTMR_SHIFT 4 #define STATUS2_DBCHG_SHIFT 5 #define STATUS2_VBVOLT_SHIFT 6 +#define MAX77836_STATUS2_VIDRM_SHIFT 7 #define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) -#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT) -#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT) -#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT) -#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT) +#define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) +#define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) +#define STATUS2_DBCHG_MASK BIT(STATUS2_DBCHG_SHIFT) +#define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) +#define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT) /* MAX14577 CONTROL1 register */ #define COMN1SW_SHIFT 0 @@ -127,8 +137,8 @@ enum max14577_muic_charger_type { #define IDBEN_SHIFT 7 #define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) #define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) -#define MICEN_MASK (0x1 << MICEN_SHIFT) -#define IDBEN_MASK (0x1 << IDBEN_SHIFT) +#define MICEN_MASK BIT(MICEN_SHIFT) +#define IDBEN_MASK BIT(IDBEN_SHIFT) #define CLEAR_IDBEN_MICEN_MASK (COMN1SW_MASK | COMP2SW_MASK) #define CTRL1_SW_USB ((1 << COMP2SW_SHIFT) \ | (1 << COMN1SW_SHIFT)) @@ -148,14 +158,14 @@ enum max14577_muic_charger_type { #define CTRL2_ACCDET_SHIFT (5) #define CTRL2_USBCPINT_SHIFT (6) #define CTRL2_RCPS_SHIFT (7) -#define CTRL2_LOWPWR_MASK (0x1 << CTRL2_LOWPWR_SHIFT) -#define CTRL2_ADCEN_MASK (0x1 << CTRL2_ADCEN_SHIFT) -#define CTRL2_CPEN_MASK (0x1 << CTRL2_CPEN_SHIFT) -#define CTRL2_SFOUTASRT_MASK (0x1 << CTRL2_SFOUTASRT_SHIFT) -#define CTRL2_SFOUTORD_MASK (0x1 << CTRL2_SFOUTORD_SHIFT) -#define CTRL2_ACCDET_MASK (0x1 << CTRL2_ACCDET_SHIFT) -#define CTRL2_USBCPINT_MASK (0x1 << CTRL2_USBCPINT_SHIFT) -#define CTRL2_RCPS_MASK (0x1 << CTR2_RCPS_SHIFT) +#define CTRL2_LOWPWR_MASK BIT(CTRL2_LOWPWR_SHIFT) +#define CTRL2_ADCEN_MASK BIT(CTRL2_ADCEN_SHIFT) +#define CTRL2_CPEN_MASK BIT(CTRL2_CPEN_SHIFT) +#define CTRL2_SFOUTASRT_MASK BIT(CTRL2_SFOUTASRT_SHIFT) +#define CTRL2_SFOUTORD_MASK BIT(CTRL2_SFOUTORD_SHIFT) +#define CTRL2_ACCDET_MASK BIT(CTRL2_ACCDET_SHIFT) +#define CTRL2_USBCPINT_MASK BIT(CTRL2_USBCPINT_SHIFT) +#define CTRL2_RCPS_MASK BIT(CTRL2_RCPS_SHIFT) #define CTRL2_CPEN1_LOWPWR0 ((1 << CTRL2_CPEN_SHIFT) | \ (0 << CTRL2_LOWPWR_SHIFT)) @@ -203,14 +213,14 @@ enum max14577_charger_reg { #define CDETCTRL1_DBEXIT_SHIFT 5 #define CDETCTRL1_DBIDLE_SHIFT 6 #define CDETCTRL1_CDPDET_SHIFT 7 -#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT) -#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT) -#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT) -#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT) -#define CDETCTRL1_DCHKTM_MASK (0x1 << CDETCTRL1_DCHKTM_SHIFT) -#define CDETCTRL1_DBEXIT_MASK (0x1 << CDETCTRL1_DBEXIT_SHIFT) -#define CDETCTRL1_DBIDLE_MASK (0x1 << CDETCTRL1_DBIDLE_SHIFT) -#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT) +#define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT) +#define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT) +#define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT) +#define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT) +#define CDETCTRL1_DCHKTM_MASK BIT(CDETCTRL1_DCHKTM_SHIFT) +#define CDETCTRL1_DBEXIT_MASK BIT(CDETCTRL1_DBEXIT_SHIFT) +#define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT) +#define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT) /* MAX14577 CHGCTRL1 register */ #define CHGCTRL1_TCHW_SHIFT 4 @@ -218,9 +228,9 @@ enum max14577_charger_reg { /* MAX14577 CHGCTRL2 register */ #define CHGCTRL2_MBCHOSTEN_SHIFT 6 -#define CHGCTRL2_MBCHOSTEN_MASK (0x1 << CHGCTRL2_MBCHOSTEN_SHIFT) +#define CHGCTRL2_MBCHOSTEN_MASK BIT(CHGCTRL2_MBCHOSTEN_SHIFT) #define CHGCTRL2_VCHGR_RC_SHIFT 7 -#define CHGCTRL2_VCHGR_RC_MASK (0x1 << CHGCTRL2_VCHGR_RC_SHIFT) +#define CHGCTRL2_VCHGR_RC_MASK BIT(CHGCTRL2_VCHGR_RC_SHIFT) /* MAX14577 CHGCTRL3 register */ #define CHGCTRL3_MBCCVWRC_SHIFT 0 @@ -230,7 +240,7 @@ enum max14577_charger_reg { #define CHGCTRL4_MBCICHWRCH_SHIFT 0 #define CHGCTRL4_MBCICHWRCH_MASK (0xf << CHGCTRL4_MBCICHWRCH_SHIFT) #define CHGCTRL4_MBCICHWRCL_SHIFT 4 -#define CHGCTRL4_MBCICHWRCL_MASK (0x1 << CHGCTRL4_MBCICHWRCL_SHIFT) +#define CHGCTRL4_MBCICHWRCL_MASK BIT(CHGCTRL4_MBCICHWRCL_SHIFT) /* MAX14577 CHGCTRL5 register */ #define CHGCTRL5_EOCS_SHIFT 0 @@ -238,7 +248,7 @@ enum max14577_charger_reg { /* MAX14577 CHGCTRL6 register */ #define CHGCTRL6_AUTOSTOP_SHIFT 5 -#define CHGCTRL6_AUTOSTOP_MASK (0x1 << CHGCTRL6_AUTOSTOP_SHIFT) +#define CHGCTRL6_AUTOSTOP_MASK BIT(CHGCTRL6_AUTOSTOP_SHIFT) /* MAX14577 CHGCTRL7 register */ #define CHGCTRL7_OTPCGHCVS_SHIFT 0 @@ -253,6 +263,70 @@ enum max14577_charger_reg { /* MAX14577 regulator SFOUT LDO voltage, fixed, uV */ #define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000 +/* Slave addr = 0x46: PMIC */ +enum max77836_pmic_reg { + MAX77836_PMIC_REG_PMIC_ID = 0x20, + MAX77836_PMIC_REG_PMIC_REV = 0x21, + MAX77836_PMIC_REG_INTSRC = 0x22, + MAX77836_PMIC_REG_INTSRC_MASK = 0x23, + MAX77836_PMIC_REG_TOPSYS_INT = 0x24, + MAX77836_PMIC_REG_TOPSYS_INT_MASK = 0x26, + MAX77836_PMIC_REG_TOPSYS_STAT = 0x28, + MAX77836_PMIC_REG_MRSTB_CNTL = 0x2A, + MAX77836_PMIC_REG_LSCNFG = 0x2B, + + MAX77836_LDO_REG_CNFG1_LDO1 = 0x51, + MAX77836_LDO_REG_CNFG2_LDO1 = 0x52, + MAX77836_LDO_REG_CNFG1_LDO2 = 0x53, + MAX77836_LDO_REG_CNFG2_LDO2 = 0x54, + MAX77836_LDO_REG_CNFG_LDO_BIAS = 0x55, + + MAX77836_COMP_REG_COMP1 = 0x60, + + MAX77836_PMIC_REG_END, +}; + +#define MAX77836_INTSRC_MASK_TOP_INT_SHIFT 1 +#define MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT 3 +#define MAX77836_INTSRC_MASK_TOP_INT_MASK BIT(MAX77836_INTSRC_MASK_TOP_INT_SHIFT) +#define MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK BIT(MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT) + +/* MAX77836 PMIC interrupts */ +#define MAX77836_TOPSYS_INT_T120C_SHIFT 0 +#define MAX77836_TOPSYS_INT_T140C_SHIFT 1 +#define MAX77836_TOPSYS_INT_T120C_MASK BIT(MAX77836_TOPSYS_INT_T120C_SHIFT) +#define MAX77836_TOPSYS_INT_T140C_MASK BIT(MAX77836_TOPSYS_INT_T140C_SHIFT) + +/* Slave addr = 0x6C: Fuel-Gauge/Battery */ +enum max77836_fg_reg { + MAX77836_FG_REG_VCELL_MSB = 0x02, + MAX77836_FG_REG_VCELL_LSB = 0x03, + MAX77836_FG_REG_SOC_MSB = 0x04, + MAX77836_FG_REG_SOC_LSB = 0x05, + MAX77836_FG_REG_MODE_H = 0x06, + MAX77836_FG_REG_MODE_L = 0x07, + MAX77836_FG_REG_VERSION_MSB = 0x08, + MAX77836_FG_REG_VERSION_LSB = 0x09, + MAX77836_FG_REG_HIBRT_H = 0x0A, + MAX77836_FG_REG_HIBRT_L = 0x0B, + MAX77836_FG_REG_CONFIG_H = 0x0C, + MAX77836_FG_REG_CONFIG_L = 0x0D, + MAX77836_FG_REG_VALRT_MIN = 0x14, + MAX77836_FG_REG_VALRT_MAX = 0x15, + MAX77836_FG_REG_CRATE_MSB = 0x16, + MAX77836_FG_REG_CRATE_LSB = 0x17, + MAX77836_FG_REG_VRESET = 0x18, + MAX77836_FG_REG_FGID = 0x19, + MAX77836_FG_REG_STATUS_H = 0x1A, + MAX77836_FG_REG_STATUS_L = 0x1B, + /* + * TODO: TABLE registers + * TODO: CMD register + */ + + MAX77836_FG_REG_END, +}; + enum max14577_irq { /* INT1 */ MAX14577_IRQ_INT1_ADC, @@ -272,17 +346,24 @@ enum max14577_irq { MAX14577_IRQ_INT3_OVP, MAX14577_IRQ_INT3_MBCCHGERR, + /* TOPSYS_INT, only MAX77836 */ + MAX77836_IRQ_TOPSYS_T140C, + MAX77836_IRQ_TOPSYS_T120C, + MAX14577_IRQ_NUM, }; struct max14577 { struct device *dev; struct i2c_client *i2c; /* Slave addr = 0x4A */ + struct i2c_client *i2c_pmic; /* Slave addr = 0x46 */ enum maxim_device_type dev_type; - struct regmap *regmap; + struct regmap *regmap; /* For MUIC and Charger */ + struct regmap *regmap_pmic; - struct regmap_irq_chip_data *irq_data; + struct regmap_irq_chip_data *irq_data; /* For MUIC and Charger */ + struct regmap_irq_chip_data *irq_data_pmic; int irq; }; diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h index 736d39c3ec0d..08b449159fd1 100644 --- a/include/linux/mfd/max14577.h +++ b/include/linux/mfd/max14577.h @@ -1,7 +1,7 @@ /* - * max14577.h - Driver for the Maxim 14577 + * max14577.h - Driver for the Maxim 14577/77836 * - * Copyright (C) 2013 Samsung Electrnoics + * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi * Krzysztof Kozlowski * @@ -20,6 +20,9 @@ * MAX14577 has MUIC, Charger devices. * The devices share the same I2C bus and interrupt line * included in this mfd driver. + * + * MAX77836 has additional PMIC and Fuel-Gauge on different I2C slave + * addresses. */ #ifndef __MAX14577_H__ -- cgit v1.2.3 From 4706a5253bcc502a5889feb98392ea7b15dd936e Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 11:17:19 +0200 Subject: extcon: max14577: Add support for MAX77836 Add support for MAX77836 chipset to the max14577 extcon driver. The MAX77836 MUIC has additional interrupts (VIDRM, ADC1K) so IRQ handling is split up into two functions: max14577_parse_irq() and max77836_parse_irq(). Signed-off-by: Krzysztof Kozlowski Acked-by: Chanwoo Choi Tested-by: Chanwoo Choi Signed-off-by: Lee Jones --- drivers/extcon/Kconfig | 4 +- drivers/extcon/extcon-max14577.c | 109 +++++++++++++++++++++++++++++------ drivers/mfd/max14577.c | 1 + include/linux/mfd/max14577-private.h | 3 + 4 files changed, 96 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index be56e8ac95e6..aebde489c291 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -28,13 +28,13 @@ config EXTCON_ADC_JACK Say Y here to enable extcon device driver based on ADC values. config EXTCON_MAX14577 - tristate "MAX14577 EXTCON Support" + tristate "MAX14577/77836 EXTCON Support" depends on MFD_MAX14577 select IRQ_DOMAIN select REGMAP_I2C help If you say yes here you get support for the MUIC device of - Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory + Maxim MAX14577/77836. The MAX14577/77836 MUIC is a USB port accessory detector and switch. config EXTCON_MAX77693 diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c index 1513013a92f1..c76734a70171 100644 --- a/drivers/extcon/extcon-max14577.c +++ b/drivers/extcon/extcon-max14577.c @@ -1,8 +1,9 @@ /* - * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC + * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC * - * Copyright (C) 2013 Samsung Electrnoics + * Copyright (C) 2013,2014 Samsung Electrnoics * Chanwoo Choi + * Krzysztof Kozlowski * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -62,6 +63,19 @@ static struct max14577_muic_irq max14577_muic_irqs[] = { { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" }, }; +static struct max14577_muic_irq max77836_muic_irqs[] = { + { MAX14577_IRQ_INT1_ADC, "muic-ADC" }, + { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" }, + { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" }, + { MAX77836_IRQ_INT1_ADC1K, "muic-ADC1K" }, + { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" }, + { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" }, + { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" }, + { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" }, + { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" }, + { MAX77836_IRQ_INT2_VIDRM, "muic-VIDRM" }, +}; + struct max14577_muic_info { struct device *dev; struct max14577 *max14577; @@ -529,21 +543,12 @@ static void max14577_muic_irq_work(struct work_struct *work) return; } -static irqreturn_t max14577_muic_irq_handler(int irq, void *data) +/* + * Sets irq_adc or irq_chg in max14577_muic_info and returns 1. + * Returns 0 if irq_type does not match registered IRQ for this device type. + */ +static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type) { - struct max14577_muic_info *info = data; - int i, irq_type = -1; - - /* - * We may be called multiple times for different nested IRQ-s. - * Including changes in INT1_ADC and INT2_CGHTYP at once. - * However we only need to know whether it was ADC, charger - * or both interrupts so decode IRQ and turn on proper flags. - */ - for (i = 0; i < info->muic_irqs_num; i++) - if (irq == info->muic_irqs[i].virq) - irq_type = info->muic_irqs[i].irq; - switch (irq_type) { case MAX14577_IRQ_INT1_ADC: case MAX14577_IRQ_INT1_ADCLOW: @@ -551,7 +556,7 @@ static irqreturn_t max14577_muic_irq_handler(int irq, void *data) /* Handle all of accessory except for type of charger accessory */ info->irq_adc = true; - break; + return 1; case MAX14577_IRQ_INT2_CHGTYP: case MAX14577_IRQ_INT2_CHGDETRUN: case MAX14577_IRQ_INT2_DCDTMR: @@ -559,8 +564,62 @@ static irqreturn_t max14577_muic_irq_handler(int irq, void *data) case MAX14577_IRQ_INT2_VBVOLT: /* Handle charger accessory */ info->irq_chg = true; + return 1; + default: + return 0; + } +} + +/* + * Sets irq_adc or irq_chg in max14577_muic_info and returns 1. + * Returns 0 if irq_type does not match registered IRQ for this device type. + */ +static int max77836_parse_irq(struct max14577_muic_info *info, int irq_type) +{ + /* First check common max14577 interrupts */ + if (max14577_parse_irq(info, irq_type)) + return 1; + + switch (irq_type) { + case MAX77836_IRQ_INT1_ADC1K: + info->irq_adc = true; + return 1; + case MAX77836_IRQ_INT2_VIDRM: + /* Handle charger accessory */ + info->irq_chg = true; + return 1; + default: + return 0; + } +} + +static irqreturn_t max14577_muic_irq_handler(int irq, void *data) +{ + struct max14577_muic_info *info = data; + int i, irq_type = -1; + bool irq_parsed; + + /* + * We may be called multiple times for different nested IRQ-s. + * Including changes in INT1_ADC and INT2_CGHTYP at once. + * However we only need to know whether it was ADC, charger + * or both interrupts so decode IRQ and turn on proper flags. + */ + for (i = 0; i < info->muic_irqs_num; i++) + if (irq == info->muic_irqs[i].virq) + irq_type = info->muic_irqs[i].irq; + + switch (info->max14577->dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + irq_parsed = max77836_parse_irq(info, irq_type); break; + case MAXIM_DEVICE_TYPE_MAX14577: default: + irq_parsed = max14577_parse_irq(info, irq_type); + break; + } + + if (!irq_parsed) { dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n", irq_type); return IRQ_HANDLED; @@ -646,6 +705,10 @@ static int max14577_muic_probe(struct platform_device *pdev) INIT_WORK(&info->irq_work, max14577_muic_irq_work); switch (max14577->dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + info->muic_irqs = max77836_muic_irqs; + info->muic_irqs_num = ARRAY_SIZE(max77836_muic_irqs); + break; case MAXIM_DEVICE_TYPE_MAX14577: default: info->muic_irqs = max14577_muic_irqs; @@ -744,6 +807,13 @@ static int max14577_muic_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id max14577_muic_id[] = { + { "max14577-muic", MAXIM_DEVICE_TYPE_MAX14577, }, + { "max77836-muic", MAXIM_DEVICE_TYPE_MAX77836, }, + { } +}; +MODULE_DEVICE_TABLE(platform, max14577_muic_id); + static struct platform_driver max14577_muic_driver = { .driver = { .name = "max14577-muic", @@ -751,11 +821,12 @@ static struct platform_driver max14577_muic_driver = { }, .probe = max14577_muic_probe, .remove = max14577_muic_remove, + .id_table = max14577_muic_id, }; module_platform_driver(max14577_muic_driver); -MODULE_DESCRIPTION("MAXIM 14577 Extcon driver"); -MODULE_AUTHOR("Chanwoo Choi "); +MODULE_DESCRIPTION("Maxim 14577/77836 Extcon driver"); +MODULE_AUTHOR("Chanwoo Choi , Krzysztof Kozlowski "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:extcon-max14577"); diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 20e3b2d81bf0..484d372a4892 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -147,6 +147,7 @@ static const struct regmap_irq max77836_muic_irqs[] = { { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, }, { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, }, { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, }, + { .reg_offset = 0, .mask = MAX77836_INT1_ADC1K_MASK, }, /* INT2 interrupts */ { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, }, { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, }, diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index e301bd19b067..a557ae27d8a8 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -87,6 +87,7 @@ enum max14577_muic_charger_type { #define MAX14577_INT1_ADC_MASK BIT(0) #define MAX14577_INT1_ADCLOW_MASK BIT(1) #define MAX14577_INT1_ADCERR_MASK BIT(2) +#define MAX77836_INT1_ADC1K_MASK BIT(3) #define MAX14577_INT2_CHGTYP_MASK BIT(0) #define MAX14577_INT2_CHGDETRUN_MASK BIT(1) @@ -332,6 +333,7 @@ enum max14577_irq { MAX14577_IRQ_INT1_ADC, MAX14577_IRQ_INT1_ADCLOW, MAX14577_IRQ_INT1_ADCERR, + MAX77836_IRQ_INT1_ADC1K, /* INT2 */ MAX14577_IRQ_INT2_CHGTYP, @@ -339,6 +341,7 @@ enum max14577_irq { MAX14577_IRQ_INT2_DCDTMR, MAX14577_IRQ_INT2_DBCHG, MAX14577_IRQ_INT2_VBVOLT, + MAX77836_IRQ_INT2_VIDRM, /* INT3 */ MAX14577_IRQ_INT3_EOC, -- cgit v1.2.3 From 8a82b408acad29161c43072727151d373e68116a Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 11:17:20 +0200 Subject: regulator: max14577: Add support for MAX77836 regulators Add support for MAX77836 chipset and its additional two LDO regulators. These LDO regulators are controlled by the PMIC block with additional regmap (different I2C slave address). The MAX77836 charger and safeout regulators are almost identical to MAX14577. The registers layout is the same, except values for charger's current. The patch adds simple mapping between device type and supported current by the charger regulator. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Mark Brown Signed-off-by: Lee Jones --- drivers/regulator/Kconfig | 7 +- drivers/regulator/max14577.c | 277 ++++++++++++++++++++++++++++++----- include/linux/mfd/max14577-private.h | 32 ++++ include/linux/mfd/max14577.h | 12 +- 4 files changed, 289 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 903eb37f047a..f0cc9e6dac3a 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -266,11 +266,12 @@ config REGULATOR_LP8788 This driver supports LP8788 voltage regulator chip. config REGULATOR_MAX14577 - tristate "Maxim 14577 regulator" + tristate "Maxim 14577/77836 regulator" depends on MFD_MAX14577 help - This driver controls a Maxim 14577 regulator via I2C bus. - The regulators include safeout LDO and current regulator 'CHARGER'. + This driver controls a Maxim MAX14577/77836 regulator via I2C bus. + The MAX14577 regulators include safeout LDO and charger current + regulator. The MAX77836 has two additional LDOs. config REGULATOR_MAX1586 tristate "Maxim 1586/1587 voltage regulator" diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index ed60baaeceec..5d9c605cf534 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c @@ -1,5 +1,5 @@ /* - * max14577.c - Regulator driver for the Maxim 14577 + * max14577.c - Regulator driver for the Maxim 14577/77836 * * Copyright (C) 2013,2014 Samsung Electronics * Krzysztof Kozlowski @@ -22,6 +22,42 @@ #include #include +/* + * Valid limits of current for max14577 and max77836 chargers. + * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4 + * register for given chipset. + */ +struct maxim_charger_current { + /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */ + unsigned int min; + /* + * Minimal current when high setting is active, + * set in CHGCTRL4/MBCICHWRCH, uA + */ + unsigned int high_start; + /* Value of one step in high setting, uA */ + unsigned int high_step; + /* Maximum current of high setting, uA */ + unsigned int max; +}; + +/* Table of valid charger currents for different Maxim chipsets */ +static const struct maxim_charger_current maxim_charger_currents[] = { + [MAXIM_DEVICE_TYPE_UNKNOWN] = { 0, 0, 0, 0 }, + [MAXIM_DEVICE_TYPE_MAX14577] = { + .min = MAX14577_REGULATOR_CURRENT_LIMIT_MIN, + .high_start = MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START, + .high_step = MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP, + .max = MAX14577_REGULATOR_CURRENT_LIMIT_MAX, + }, + [MAXIM_DEVICE_TYPE_MAX77836] = { + .min = MAX77836_REGULATOR_CURRENT_LIMIT_MIN, + .high_start = MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START, + .high_step = MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP, + .max = MAX77836_REGULATOR_CURRENT_LIMIT_MAX, + }, +}; + static int max14577_reg_is_enabled(struct regulator_dev *rdev) { int rid = rdev_get_id(rdev); @@ -47,6 +83,9 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev) { u8 reg_data; struct regmap *rmap = rdev->regmap; + struct max14577 *max14577 = rdev_get_drvdata(rdev); + const struct maxim_charger_current *limits = + &maxim_charger_currents[max14577->dev_type]; if (rdev_get_id(rdev) != MAX14577_CHARGER) return -EINVAL; @@ -54,12 +93,11 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev) max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, ®_data); if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0) - return MAX14577_REGULATOR_CURRENT_LIMIT_MIN; + return limits->min; reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >> CHGCTRL4_MBCICHWRCH_SHIFT); - return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + - reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP; + return limits->high_start + reg_data * limits->high_step; } static int max14577_reg_set_current_limit(struct regulator_dev *rdev, @@ -67,33 +105,39 @@ static int max14577_reg_set_current_limit(struct regulator_dev *rdev, { int i, current_bits = 0xf; u8 reg_data; + struct max14577 *max14577 = rdev_get_drvdata(rdev); + const struct maxim_charger_current *limits = + &maxim_charger_currents[max14577->dev_type]; if (rdev_get_id(rdev) != MAX14577_CHARGER) return -EINVAL; - if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX || - max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN) + if (min_uA > limits->max || max_uA < limits->min) return -EINVAL; - if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) { - /* Less than 200 mA, so set 90mA (turn only Low Bit off) */ + if (max_uA < limits->high_start) { + /* + * Less than high_start, + * so set the minimal current (turn only Low Bit off) + */ u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT; return max14577_update_reg(rdev->regmap, MAX14577_CHG_REG_CHG_CTRL4, CHGCTRL4_MBCICHWRCL_MASK, reg_data); } - /* max_uA is in range: , so search for - * valid current starting from LIMIT_MAX. */ - for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX; - i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START; - i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) { + /* + * max_uA is in range: , so search for + * valid current starting from maximum current. + */ + for (i = limits->max; i >= limits->high_start; i -= limits->high_step) { if (i <= max_uA) break; current_bits--; } BUG_ON(current_bits < 0); /* Cannot happen */ - /* Turn Low Bit on (use range 200mA-950 mA) */ + + /* Turn Low Bit on (use range high_start-max)... */ reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT; /* and set proper High Bits */ reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT; @@ -118,7 +162,7 @@ static struct regulator_ops max14577_charger_ops = { .set_current_limit = max14577_reg_set_current_limit, }; -static const struct regulator_desc supported_regulators[] = { +static const struct regulator_desc max14577_supported_regulators[] = { [MAX14577_SAFEOUT] = { .name = "SAFEOUT", .id = MAX14577_SAFEOUT, @@ -141,16 +185,88 @@ static const struct regulator_desc supported_regulators[] = { }, }; +static struct regulator_ops max77836_ldo_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + /* TODO: add .set_suspend_mode */ +}; + +static const struct regulator_desc max77836_supported_regulators[] = { + [MAX14577_SAFEOUT] = { + .name = "SAFEOUT", + .id = MAX14577_SAFEOUT, + .ops = &max14577_safeout_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = 1, + .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE, + .enable_reg = MAX14577_REG_CONTROL2, + .enable_mask = CTRL2_SFOUTORD_MASK, + }, + [MAX14577_CHARGER] = { + .name = "CHARGER", + .id = MAX14577_CHARGER, + .ops = &max14577_charger_ops, + .type = REGULATOR_CURRENT, + .owner = THIS_MODULE, + .enable_reg = MAX14577_CHG_REG_CHG_CTRL2, + .enable_mask = CHGCTRL2_MBCHOSTEN_MASK, + }, + [MAX77836_LDO1] = { + .name = "LDO1", + .id = MAX77836_LDO1, + .ops = &max77836_ldo_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM, + .min_uV = MAX77836_REGULATOR_LDO_VOLTAGE_MIN, + .uV_step = MAX77836_REGULATOR_LDO_VOLTAGE_STEP, + .enable_reg = MAX77836_LDO_REG_CNFG1_LDO1, + .enable_mask = MAX77836_CNFG1_LDO_PWRMD_MASK, + .vsel_reg = MAX77836_LDO_REG_CNFG1_LDO1, + .vsel_mask = MAX77836_CNFG1_LDO_TV_MASK, + }, + [MAX77836_LDO2] = { + .name = "LDO2", + .id = MAX77836_LDO2, + .ops = &max77836_ldo_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM, + .min_uV = MAX77836_REGULATOR_LDO_VOLTAGE_MIN, + .uV_step = MAX77836_REGULATOR_LDO_VOLTAGE_STEP, + .enable_reg = MAX77836_LDO_REG_CNFG1_LDO2, + .enable_mask = MAX77836_CNFG1_LDO_PWRMD_MASK, + .vsel_reg = MAX77836_LDO_REG_CNFG1_LDO2, + .vsel_mask = MAX77836_CNFG1_LDO_TV_MASK, + }, +}; + #ifdef CONFIG_OF static struct of_regulator_match max14577_regulator_matches[] = { { .name = "SAFEOUT", }, { .name = "CHARGER", }, }; -static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) +static struct of_regulator_match max77836_regulator_matches[] = { + { .name = "SAFEOUT", }, + { .name = "CHARGER", }, + { .name = "LDO1", }, + { .name = "LDO2", }, +}; + +static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, + enum maxim_device_type dev_type) { int ret; struct device_node *np; + struct of_regulator_match *regulator_matches; + unsigned int regulator_matches_size; np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); if (!np) { @@ -158,8 +274,19 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) return -EINVAL; } - ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches, - MAX14577_REG_MAX); + switch (dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + regulator_matches = max77836_regulator_matches; + regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches); + break; + case MAXIM_DEVICE_TYPE_MAX14577: + default: + regulator_matches = max14577_regulator_matches; + regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches); + } + + ret = of_regulator_match(&pdev->dev, np, regulator_matches, + regulator_matches_size); if (ret < 0) dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); else @@ -170,31 +297,74 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) return ret; } -static inline struct regulator_init_data *match_init_data(int index) +static inline struct regulator_init_data *match_init_data(int index, + enum maxim_device_type dev_type) { - return max14577_regulator_matches[index].init_data; + switch (dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + return max77836_regulator_matches[index].init_data; + + case MAXIM_DEVICE_TYPE_MAX14577: + default: + return max14577_regulator_matches[index].init_data; + } } -static inline struct device_node *match_of_node(int index) +static inline struct device_node *match_of_node(int index, + enum maxim_device_type dev_type) { - return max14577_regulator_matches[index].of_node; + switch (dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + return max77836_regulator_matches[index].of_node; + + case MAXIM_DEVICE_TYPE_MAX14577: + default: + return max14577_regulator_matches[index].of_node; + } } #else /* CONFIG_OF */ -static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) +static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, + enum maxim_device_type dev_type) { return 0; } -static inline struct regulator_init_data *match_init_data(int index) +static inline struct regulator_init_data *match_init_data(int index, + enum maxim_device_type dev_type) { return NULL; } -static inline struct device_node *match_of_node(int index) +static inline struct device_node *match_of_node(int index, + enum maxim_device_type dev_type) { return NULL; } #endif /* CONFIG_OF */ +/** + * Registers for regulators of max77836 use different I2C slave addresses so + * different regmaps must be used for them. + * + * Returns proper regmap for accessing regulator passed by id. + */ +static struct regmap *max14577_get_regmap(struct max14577 *max14577, + int reg_id) +{ + switch (max14577->dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + switch (reg_id) { + case MAX77836_SAFEOUT ... MAX77836_CHARGER: + return max14577->regmap; + default: + /* MAX77836_LDO1 ... MAX77836_LDO2 */ + return max14577->regmap_pmic; + } + + case MAXIM_DEVICE_TYPE_MAX14577: + default: + return max14577->regmap; + } +} static int max14577_regulator_probe(struct platform_device *pdev) { @@ -202,15 +372,29 @@ static int max14577_regulator_probe(struct platform_device *pdev) struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); int i, ret; struct regulator_config config = {}; + const struct regulator_desc *supported_regulators; + unsigned int supported_regulators_size; + enum maxim_device_type dev_type = max14577->dev_type; - ret = max14577_regulator_dt_parse_pdata(pdev); + ret = max14577_regulator_dt_parse_pdata(pdev, dev_type); if (ret) return ret; + switch (dev_type) { + case MAXIM_DEVICE_TYPE_MAX77836: + supported_regulators = max77836_supported_regulators; + supported_regulators_size = ARRAY_SIZE(max77836_supported_regulators); + break; + case MAXIM_DEVICE_TYPE_MAX14577: + default: + supported_regulators = max14577_supported_regulators; + supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); + } + config.dev = &pdev->dev; - config.regmap = max14577->regmap; + config.driver_data = max14577; - for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) { + for (i = 0; i < supported_regulators_size; i++) { struct regulator_dev *regulator; /* * Index of supported_regulators[] is also the id and must @@ -220,17 +404,19 @@ static int max14577_regulator_probe(struct platform_device *pdev) config.init_data = pdata->regulators[i].initdata; config.of_node = pdata->regulators[i].of_node; } else { - config.init_data = match_init_data(i); - config.of_node = match_of_node(i); + config.init_data = match_init_data(i, dev_type); + config.of_node = match_of_node(i, dev_type); } + config.regmap = max14577_get_regmap(max14577, + supported_regulators[i].id); regulator = devm_regulator_register(&pdev->dev, &supported_regulators[i], &config); if (IS_ERR(regulator)) { ret = PTR_ERR(regulator); dev_err(&pdev->dev, - "Regulator init failed for ID %d with error: %d\n", - i, ret); + "Regulator init failed for %d/%s with error: %d\n", + i, supported_regulators[i].name, ret); return ret; } } @@ -238,20 +424,41 @@ static int max14577_regulator_probe(struct platform_device *pdev) return ret; } +static const struct platform_device_id max14577_regulator_id[] = { + { "max14577-regulator", MAXIM_DEVICE_TYPE_MAX14577, }, + { "max77836-regulator", MAXIM_DEVICE_TYPE_MAX77836, }, + { } +}; +MODULE_DEVICE_TABLE(platform, max14577_regulator_id); + static struct platform_driver max14577_regulator_driver = { .driver = { .owner = THIS_MODULE, .name = "max14577-regulator", }, - .probe = max14577_regulator_probe, + .probe = max14577_regulator_probe, + .id_table = max14577_regulator_id, }; static int __init max14577_regulator_init(void) { + /* Check for valid values for charger */ BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf != MAX14577_REGULATOR_CURRENT_LIMIT_MAX); - BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX); + BUILD_BUG_ON(MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START + + MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf != + MAX77836_REGULATOR_CURRENT_LIMIT_MAX); + /* Valid charger current values must be provided for each chipset */ + BUILD_BUG_ON(ARRAY_SIZE(maxim_charger_currents) != MAXIM_DEVICE_TYPE_NUM); + + BUILD_BUG_ON(ARRAY_SIZE(max14577_supported_regulators) != MAX14577_REGULATOR_NUM); + BUILD_BUG_ON(ARRAY_SIZE(max77836_supported_regulators) != MAX77836_REGULATOR_NUM); + + BUILD_BUG_ON(MAX77836_REGULATOR_LDO_VOLTAGE_MIN + + (MAX77836_REGULATOR_LDO_VOLTAGE_STEP * + (MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM - 1)) != + MAX77836_REGULATOR_LDO_VOLTAGE_MAX); return platform_driver_register(&max14577_regulator_driver); } @@ -264,6 +471,6 @@ static void __exit max14577_regulator_exit(void) module_exit(max14577_regulator_exit); MODULE_AUTHOR("Krzysztof Kozlowski "); -MODULE_DESCRIPTION("MAXIM 14577 regulator driver"); +MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:max14577-regulator"); diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index a557ae27d8a8..499253604026 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -261,9 +261,21 @@ enum max14577_charger_reg { #define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP 50000 #define MAX14577_REGULATOR_CURRENT_LIMIT_MAX 950000 +/* MAX77836 regulator current limits (as in CHGCTRL4 register), uA */ +#define MAX77836_REGULATOR_CURRENT_LIMIT_MIN 45000 +#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START 100000 +#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP 25000 +#define MAX77836_REGULATOR_CURRENT_LIMIT_MAX 475000 + /* MAX14577 regulator SFOUT LDO voltage, fixed, uV */ #define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000 +/* MAX77836 regulator LDOx voltage, uV */ +#define MAX77836_REGULATOR_LDO_VOLTAGE_MIN 800000 +#define MAX77836_REGULATOR_LDO_VOLTAGE_MAX 3950000 +#define MAX77836_REGULATOR_LDO_VOLTAGE_STEP 50000 +#define MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM 64 + /* Slave addr = 0x46: PMIC */ enum max77836_pmic_reg { MAX77836_PMIC_REG_PMIC_ID = 0x20, @@ -298,6 +310,26 @@ enum max77836_pmic_reg { #define MAX77836_TOPSYS_INT_T120C_MASK BIT(MAX77836_TOPSYS_INT_T120C_SHIFT) #define MAX77836_TOPSYS_INT_T140C_MASK BIT(MAX77836_TOPSYS_INT_T140C_SHIFT) +/* LDO1/LDO2 CONFIG1 register */ +#define MAX77836_CNFG1_LDO_PWRMD_SHIFT 6 +#define MAX77836_CNFG1_LDO_TV_SHIFT 0 +#define MAX77836_CNFG1_LDO_PWRMD_MASK (0x3 << MAX77836_CNFG1_LDO_PWRMD_SHIFT) +#define MAX77836_CNFG1_LDO_TV_MASK (0x3f << MAX77836_CNFG1_LDO_TV_SHIFT) + +/* LDO1/LDO2 CONFIG2 register */ +#define MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT 7 +#define MAX77836_CNFG2_LDO_ALPMEN_SHIFT 6 +#define MAX77836_CNFG2_LDO_COMP_SHIFT 4 +#define MAX77836_CNFG2_LDO_POK_SHIFT 3 +#define MAX77836_CNFG2_LDO_ADE_SHIFT 1 +#define MAX77836_CNFG2_LDO_SS_SHIFT 0 +#define MAX77836_CNFG2_LDO_OVCLMPEN_MASK BIT(MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT) +#define MAX77836_CNFG2_LDO_ALPMEN_MASK BIT(MAX77836_CNFG2_LDO_ALPMEN_SHIFT) +#define MAX77836_CNFG2_LDO_COMP_MASK (0x3 << MAX77836_CNFG2_LDO_COMP_SHIFT) +#define MAX77836_CNFG2_LDO_POK_MASK BIT(MAX77836_CNFG2_LDO_POK_SHIFT) +#define MAX77836_CNFG2_LDO_ADE_MASK BIT(MAX77836_CNFG2_LDO_ADE_SHIFT) +#define MAX77836_CNFG2_LDO_SS_MASK BIT(MAX77836_CNFG2_LDO_SS_SHIFT) + /* Slave addr = 0x6C: Fuel-Gauge/Battery */ enum max77836_fg_reg { MAX77836_FG_REG_VCELL_MSB = 0x02, diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h index 08b449159fd1..c83fbed1c7b6 100644 --- a/include/linux/mfd/max14577.h +++ b/include/linux/mfd/max14577.h @@ -35,7 +35,17 @@ enum max14577_regulators { MAX14577_SAFEOUT = 0, MAX14577_CHARGER, - MAX14577_REG_MAX, + MAX14577_REGULATOR_NUM, +}; + +/* MAX77836 regulator IDs */ +enum max77836_regulators { + MAX77836_SAFEOUT = 0, + MAX77836_CHARGER, + MAX77836_LDO1, + MAX77836_LDO2, + + MAX77836_REGULATOR_NUM, }; struct max14577_regulator_platform_data { -- cgit v1.2.3 From a086f6a1ebc9d8d2d028b99e779ce0dbd9691dea Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 17 Apr 2014 17:06:12 +0800 Subject: Revert "KVM: Simplify kvm->tlbs_dirty handling" This reverts commit 5befdc385ddb2d5ae8995ad89004529a3acf58fc. Since we will allow flush tlb out of mmu-lock in the later patch Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/paging_tmpl.h | 7 +++---- include/linux/kvm_host.h | 4 +--- virt/kvm/kvm_main.c | 5 ++++- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 123efd3ec29f..410776528265 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -913,8 +913,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't * used by guest then tlbs are not flushed, so guest is allowed to access the * freed pages. - * We set tlbs_dirty to let the notifier know this change and delay the flush - * until such a case actually happens. + * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -943,7 +942,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return -EINVAL; if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { - vcpu->kvm->tlbs_dirty = true; + vcpu->kvm->tlbs_dirty++; continue; } @@ -958,7 +957,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) if (gfn != sp->gfns[i]) { drop_spte(vcpu->kvm, &sp->spt[i]); - vcpu->kvm->tlbs_dirty = true; + vcpu->kvm->tlbs_dirty++; continue; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 32d263f683dc..820fc2e1d9df 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -411,9 +411,7 @@ struct kvm { unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif - /* Protected by mmu_lock */ - bool tlbs_dirty; - + long tlbs_dirty; struct list_head devices; }; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ea46d64c8e75..fa70c6e642b4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -186,9 +186,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) void kvm_flush_remote_tlbs(struct kvm *kvm) { + long dirty_count = kvm->tlbs_dirty; + + smp_mb(); if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; - kvm->tlbs_dirty = false; + cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); } EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); -- cgit v1.2.3 From 5686a1e5aa436c49187a60052d5885fb1f541ce6 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Mon, 14 Apr 2014 15:47:01 +0200 Subject: bus: mvebu: pass the coherency availability information at init time Until now, the mvebu-mbus was guessing by itself whether hardware I/O coherency was available or not by poking into the Device Tree to see if the coherency fabric Device Tree node was present or not. However, on some upcoming SoCs, the presence or absence of the coherency fabric DT node isn't sufficient: in CONFIG_SMP, the coherency can be enabled, but not in !CONFIG_SMP. In order to clean this up, the mvebu_mbus_dt_init() function is extended to get a boolean argument telling whether coherency is enabled or not. Therefore, the logic to decide whether coherency is available or not now belongs to the core SoC code instead of the mvebu-mbus driver itself, which is much better. Signed-off-by: Thomas Petazzoni Link: https://lkml.kernel.org/r/1397483228-25625-4-git-send-email-thomas.petazzoni@free-electrons.com Signed-off-by: Jason Cooper --- arch/arm/mach-kirkwood/board-dt.c | 2 +- arch/arm/mach-mvebu/board-v7.c | 2 +- arch/arm/mach-mvebu/dove.c | 2 +- arch/arm/mach-mvebu/kirkwood.c | 2 +- drivers/bus/mvebu-mbus.c | 11 +++-------- include/linux/mbus.h | 2 +- 6 files changed, 8 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 2801da49e2a3..ff18ff20f71f 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -195,7 +195,7 @@ static void __init kirkwood_dt_init(void) { kirkwood_disable_mbus_error_propagation(); - BUG_ON(mvebu_mbus_dt_init()); + BUG_ON(mvebu_mbus_dt_init(false)); #ifdef CONFIG_CACHE_FEROCEON_L2 feroceon_of_init(); diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index 333fca8fdc41..1730e0cdb6f6 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -58,7 +58,7 @@ static void __init mvebu_timer_and_clk_init(void) of_clk_init(NULL); clocksource_of_init(); coherency_init(); - BUG_ON(mvebu_mbus_dt_init()); + BUG_ON(mvebu_mbus_dt_init(coherency_available())); #ifdef CONFIG_CACHE_L2X0 l2x0_of_init(0, ~0UL); #endif diff --git a/arch/arm/mach-mvebu/dove.c b/arch/arm/mach-mvebu/dove.c index 5e5a43624237..b50464ec1130 100644 --- a/arch/arm/mach-mvebu/dove.c +++ b/arch/arm/mach-mvebu/dove.c @@ -23,7 +23,7 @@ static void __init dove_init(void) #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif - BUG_ON(mvebu_mbus_dt_init()); + BUG_ON(mvebu_mbus_dt_init(false)); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c index 120207fc36f1..a77e0bae9c55 100644 --- a/arch/arm/mach-mvebu/kirkwood.c +++ b/arch/arm/mach-mvebu/kirkwood.c @@ -169,7 +169,7 @@ static void __init kirkwood_dt_init(void) { kirkwood_disable_mbus_error_propagation(); - BUG_ON(mvebu_mbus_dt_init()); + BUG_ON(mvebu_mbus_dt_init(false)); #ifdef CONFIG_CACHE_FEROCEON_L2 feroceon_of_init(); diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 293e2e0a0a87..ff02fc90fc21 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -694,7 +694,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, phys_addr_t sdramwins_phys_base, size_t sdramwins_size) { - struct device_node *np; int win; mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); @@ -707,12 +706,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, return -ENOMEM; } - np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); - if (np) { - mbus->hw_io_coherency = 1; - of_node_put(np); - } - for (win = 0; win < mbus->soc->num_wins; win++) mvebu_mbus_disable_window(mbus, win); @@ -882,7 +875,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np, } } -int __init mvebu_mbus_dt_init(void) +int __init mvebu_mbus_dt_init(bool is_coherent) { struct resource mbuswins_res, sdramwins_res; struct device_node *np, *controller; @@ -920,6 +913,8 @@ int __init mvebu_mbus_dt_init(void) return -EINVAL; } + mbus_state.hw_io_coherency = is_coherent; + /* Get optional pcie-{mem,io}-aperture properties */ mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture, &mbus_state.pcie_io_aperture); diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 345b8c53b897..550c88fb0267 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -73,6 +73,6 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size); int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base, size_t mbus_size, phys_addr_t sdram_phys_base, size_t sdram_size); -int mvebu_mbus_dt_init(void); +int mvebu_mbus_dt_init(bool is_coherent); #endif /* __LINUX_MBUS_H */ -- cgit v1.2.3 From 1111244ff4493448c0ee66e814e20c6e81d3b93d Mon Sep 17 00:00:00 2001 From: Sangjung Woo Date: Mon, 21 Apr 2014 19:10:08 +0900 Subject: extcon: Add resource-managed extcon register function Add resource-managed extcon device register function for convenience. For example, if a extcon device is attached with new devm_extcon_dev_register(), that extcon device is automatically unregistered on driver detach. Signed-off-by: Sangjung Woo [Fix bug about devm_extcon_dev_match/release() and code clean by Chanwoo Choi] Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-class.c | 69 +++++++++++++++++++++++++++++++++++++++++++ include/linux/extcon.h | 13 ++++++++ 2 files changed, 82 insertions(+) (limited to 'include/linux') diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index 7ab21aa6eaa1..f6df68989651 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c @@ -819,6 +819,75 @@ void extcon_dev_unregister(struct extcon_dev *edev) } EXPORT_SYMBOL_GPL(extcon_dev_unregister); +static void devm_extcon_dev_unreg(struct device *dev, void *res) +{ + extcon_dev_unregister(*(struct extcon_dev **)res); +} + +static int devm_extcon_dev_match(struct device *dev, void *res, void *data) +{ + struct extcon_dev **r = res; + + if (!r || !*r) { + WARN_ON(!r || !*r); + return 0; + } + + return *r == data; +} + +/** + * devm_extcon_dev_register() - Resource-managed extcon_dev_register() + * @dev: device to allocate extcon device + * @edev: the new extcon device to register + * + * Managed extcon_dev_register() function. If extcon device is attached with + * this function, that extcon device is automatically unregistered on driver + * detach. Internally this function calls extcon_dev_register() function. + * To get more information, refer that function. + * + * If extcon device is registered with this function and the device needs to be + * unregistered separately, devm_extcon_dev_unregister() should be used. + * + * Returns 0 if success or negaive error number if failure. + */ +int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev) +{ + struct extcon_dev **ptr; + int ret; + + ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = extcon_dev_register(edev); + if (ret) { + devres_free(ptr); + return ret; + } + + *ptr = edev; + devres_add(dev, ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_register); + +/** + * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister() + * @dev: device the extcon belongs to + * @edev: the extcon device to unregister + * + * Unregister extcon device that is registered with devm_extcon_dev_register() + * function. + */ +void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev) +{ + WARN_ON(devres_release(dev, devm_extcon_dev_unreg, + devm_extcon_dev_match, edev)); +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister); + #ifdef CONFIG_OF /* * extcon_get_edev_by_phandle - Get the extcon device from devicetree diff --git a/include/linux/extcon.h b/include/linux/extcon.h index f488145bb2d4..548447be2d8f 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -185,6 +185,10 @@ struct extcon_specific_cable_nb { */ extern int extcon_dev_register(struct extcon_dev *edev); extern void extcon_dev_unregister(struct extcon_dev *edev); +extern int devm_extcon_dev_register(struct device *dev, + struct extcon_dev *edev); +extern void devm_extcon_dev_unregister(struct device *dev, + struct extcon_dev *edev); extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); /* @@ -254,6 +258,15 @@ static inline int extcon_dev_register(struct extcon_dev *edev) static inline void extcon_dev_unregister(struct extcon_dev *edev) { } +static inline int devm_extcon_dev_register(struct device *dev, + struct extcon_dev *edev) +{ + return -EINVAL; +} + +static inline void devm_extcon_dev_unregister(struct device *dev, + struct extcon_dev *edev) { } + static inline u32 extcon_get_state(struct extcon_dev *edev) { return 0; -- cgit v1.2.3 From 7ee4910ab31c4b1fafb7e4f273cbe9340ac953aa Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 31 Mar 2014 15:19:29 +0200 Subject: PCI: Remove old serial device IDs These IDs are no longer referenced since kernel 3.1 so I suppose we can remove them from pci_ids.h. Signed-off-by: Jean Delvare Signed-off-by: Bjorn Helgaas Acked-by: Greg Kroah-Hartman --- include/linux/pci_ids.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d4de24b4d4c6..7fa31731c854 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1631,8 +1631,6 @@ #define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 #define PCI_VENDOR_ID_SPECIALIX 0x11cb -#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000 -#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000 #define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 #define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 @@ -2874,7 +2872,6 @@ #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 #define PCI_VENDOR_ID_COMPUTONE 0x8e0e -#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 #define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 #define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e #define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 -- cgit v1.2.3 From 879eb9c3f9b854394c5a2014b9243c00eaa329f0 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Wed, 23 Apr 2014 09:58:25 -0500 Subject: tty_ldisc: add more limits to the @write_wakeup In the uart_handle_cts_change(), uart_write_wakeup() is called after we call @uart_port->ops->start_tx(). The Documentation/serial/driver tells us: ----------------------------------------------- start_tx(port) Start transmitting characters. Locking: port->lock taken. Interrupts: locally disabled. ----------------------------------------------- So when the uart_write_wakeup() is called, the port->lock is taken by the upper. See the following callstack: |_ uart_write_wakeup |_ tty_wakeup |_ ld->ops->write_wakeup With the port->lock held, we call the @write_wakeup. Some implemetation of the @write_wakeup does not notice that the port->lock is held, and it still tries to send data with uart_write() which will try to grab the prot->lock. A dead lock occurs, see the following log caught in the Bluetooth by uart: -------------------------------------------------------------------- BUG: spinlock lockup suspected on CPU#0, swapper/0/0 lock: 0xdc3f4410, .magic: dead4ead, .owner: swapper/0/0, .owner_cpu: 0 CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 3.10.17-16839-ge4a1bef #1320 [<80014cbc>] (unwind_backtrace+0x0/0x138) from [<8001251c>] (show_stack+0x10/0x14) [<8001251c>] (show_stack+0x10/0x14) from [<802816ac>] (do_raw_spin_lock+0x108/0x184) [<802816ac>] (do_raw_spin_lock+0x108/0x184) from [<806a22b0>] (_raw_spin_lock_irqsave+0x54/0x60) [<806a22b0>] (_raw_spin_lock_irqsave+0x54/0x60) from [<802f5754>] (uart_write+0x38/0xe0) [<802f5754>] (uart_write+0x38/0xe0) from [<80455270>] (hci_uart_tx_wakeup+0xa4/0x168) [<80455270>] (hci_uart_tx_wakeup+0xa4/0x168) from [<802dab18>] (tty_wakeup+0x50/0x5c) [<802dab18>] (tty_wakeup+0x50/0x5c) from [<802f81a4>] (imx_rtsint+0x50/0x80) [<802f81a4>] (imx_rtsint+0x50/0x80) from [<802f88f4>] (imx_int+0x158/0x17c) [<802f88f4>] (imx_int+0x158/0x17c) from [<8007abe0>] (handle_irq_event_percpu+0x50/0x194) [<8007abe0>] (handle_irq_event_percpu+0x50/0x194) from [<8007ad60>] (handle_irq_event+0x3c/0x5c) -------------------------------------------------------------------- This patch adds more limits to the @write_wakeup, the one who wants to implemet the @write_wakeup should follow the limits which avoid the deadlock. Signed-off-by: Huang Shijie Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- include/linux/tty_ldisc.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index add26da2faeb..00c9d688d7b7 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -92,7 +92,10 @@ * This function is called by the low-level tty driver to signal * that line discpline should try to send more characters to the * low-level driver for transmission. If the line discpline does - * not have any more data to send, it can just return. + * not have any more data to send, it can just return. If the line + * discipline does have some data to send, please arise a tasklet + * or workqueue to do the real data transfer. Do not send data in + * this hook, it may leads to a deadlock. * * int (*hangup)(struct tty_struct *) * -- cgit v1.2.3 From c1309040967e200d3ea6415ae54cf6a69d7ad996 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Mon, 31 Mar 2014 14:58:39 -0700 Subject: PCI: Use designated initialization in PCI_VDEVICE By using designated initialization in PCI_VDEVICE, like other similar macros, many "missing initializer" warnings that appear when compiling with W=2 can be silenced. Tested-by: Phil Schmitt Tested-by: Aaron Brown Tested-by: Kavindya Deegala Signed-off-by: Mark Rustad Signed-off-by: Jeff Kirsher Signed-off-by: Bjorn Helgaas --- include/linux/pci.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pci.h b/include/linux/pci.h index aab57b4abe7f..a95aac7ad37f 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -680,8 +680,8 @@ struct pci_driver { /** * PCI_VDEVICE - macro used to describe a specific pci device in short form - * @vendor: the vendor name - * @device: the 16 bit PCI Device ID + * @vend: the vendor name + * @dev: the 16 bit PCI Device ID * * This macro is used to create a struct pci_device_id that matches a * specific PCI device. The subvendor, and subdevice fields will be set @@ -689,9 +689,9 @@ struct pci_driver { * private data. */ -#define PCI_VDEVICE(vendor, device) \ - PCI_VENDOR_ID_##vendor, (device), \ - PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#define PCI_VDEVICE(vend, dev) \ + .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 /* these external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI -- cgit v1.2.3 From 9aac5887595b765b6f64b2af08b785e82e095b57 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 18 Apr 2014 17:19:55 -0500 Subject: tty/serial: add generic serial earlycon This introduces generic earlycon infrastructure for serial devices based on the 8250 earlycon. This allows for supporting earlycon option with other serial devices. The earlycon output is enabled at the time early_params are processed. Only architectures that have fixmap support or have functional ioremap when early_params are processed are supported. This is the same restriction that the 8250 driver had. Signed-off-by: Rob Herring Cc: Jiri Slaby Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 7 ++ drivers/tty/serial/Makefile | 2 + drivers/tty/serial/earlycon.c | 152 ++++++++++++++++++++++++++++++++++++++++++ include/linux/serial_core.h | 16 +++++ 4 files changed, 177 insertions(+) create mode 100644 drivers/tty/serial/earlycon.c (limited to 'include/linux') diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 30530e47cdf0..9fb6028ad900 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -7,6 +7,13 @@ if TTY menu "Serial drivers" depends on HAS_IOMEM +config SERIAL_EARLYCON + bool + help + Support for early consoles with the earlycon parameter. This enables + the console before standard serial driver is probed. The console is + enabled when early_param is processed. + source "drivers/tty/serial/8250/Kconfig" comment "Non-8250 serial port support" diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 5f2a3f493ab9..28048178f308 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -5,6 +5,8 @@ obj-$(CONFIG_SERIAL_CORE) += serial_core.o obj-$(CONFIG_SERIAL_21285) += 21285.o +obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o + # These Sparc drivers have to appear before others such as 8250 # which share ttySx minor node space. Otherwise console device # names change and other unplesantries. diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c new file mode 100644 index 000000000000..73bf1e21aae0 --- /dev/null +++ b/drivers/tty/serial/earlycon.c @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * Author: Rob Herring + * + * Based on 8250 earlycon: + * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#ifdef CONFIG_FIX_EARLYCON_MEM +#include +#endif + +#include + +static struct console early_con = { + .name = "earlycon", + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1, +}; + +static struct earlycon_device early_console_dev = { + .con = &early_con, +}; + +static void __iomem * __init earlycon_map(unsigned long paddr, size_t size) +{ + void __iomem *base; +#ifdef CONFIG_FIX_EARLYCON_MEM + set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr & PAGE_MASK); + base = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); + base += paddr & ~PAGE_MASK; +#else + base = ioremap(paddr, size); +#endif + if (!base) + pr_err("%s: Couldn't map 0x%llx\n", __func__, + (unsigned long long)paddr); + + return base; +} + +static int __init parse_options(struct earlycon_device *device, + char *options) +{ + struct uart_port *port = &device->port; + int mmio, mmio32, length, ret; + unsigned long addr; + + if (!options) + return -ENODEV; + + mmio = !strncmp(options, "mmio,", 5); + mmio32 = !strncmp(options, "mmio32,", 7); + if (mmio || mmio32) { + port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32); + options += mmio ? 5 : 7; + ret = kstrtoul(options, 0, &addr); + if (ret) + return ret; + port->mapbase = addr; + if (mmio32) + port->regshift = 2; + } else if (!strncmp(options, "io,", 3)) { + port->iotype = UPIO_PORT; + options += 3; + ret = kstrtoul(options, 0, &addr); + if (ret) + return ret; + port->iobase = addr; + mmio = 0; + } else if (!strncmp(options, "0x", 2)) { + port->iotype = UPIO_MEM; + ret = kstrtoul(options, 0, &addr); + if (ret) + return ret; + port->mapbase = addr; + } else { + return -EINVAL; + } + + port->uartclk = BASE_BAUD * 16; + + options = strchr(options, ','); + if (options) { + options++; + ret = kstrtouint(options, 0, &device->baud); + if (ret) + return ret; + length = min(strcspn(options, " ") + 1, + (size_t)(sizeof(device->options))); + strlcpy(device->options, options, length); + } + + if (mmio || mmio32) + pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n", + mmio32 ? "32" : "", + (unsigned long long)port->mapbase, + device->options); + else + pr_info("Early serial console at I/O port 0x%lx (options '%s')\n", + port->iobase, + device->options); + + return 0; +} + +int __init setup_earlycon(char *buf, const char *match, + int (*setup)(struct earlycon_device *, const char *)) +{ + int err; + size_t len; + struct uart_port *port = &early_console_dev.port; + + if (!buf || !match || !setup) + return 0; + + len = strlen(match); + if (strncmp(buf, match, len)) + return 0; + if (buf[len] && (buf[len] != ',')) + return 0; + + buf += len + 1; + + err = parse_options(&early_console_dev, buf); + /* On parsing error, pass the options buf to the setup function */ + if (!err) + buf = NULL; + + if (port->mapbase) + port->membase = earlycon_map(port->mapbase, 64); + + early_console_dev.con->data = &early_console_dev; + err = setup(&early_console_dev, buf); + if (err < 0) + return err; + if (!early_console_dev.con->write) + return -ENODEV; + + register_console(early_console_dev.con); + return 0; +} diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index f729be981da0..7a15b5b24c0b 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -285,6 +285,22 @@ static inline int uart_poll_timeout(struct uart_port *port) /* * Console helpers. */ +struct earlycon_device { + struct console *con; + struct uart_port port; + char options[16]; /* e.g., 115200n8 */ + unsigned int baud; +}; +int setup_earlycon(char *buf, const char *match, + int (*setup)(struct earlycon_device *, const char *)); + +#define EARLYCON_DECLARE(name, func) \ +static int __init name ## _setup_earlycon(char *buf) \ +{ \ + return setup_earlycon(buf, __stringify(name), func); \ +} \ +early_param("earlycon", name ## _setup_earlycon); + struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); void uart_parse_options(char *options, int *baud, int *parity, int *bits, -- cgit v1.2.3 From 38535201633077cbaf8b32886b5e3005b36c9024 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 25 Apr 2014 02:32:53 -0700 Subject: blk-mq: respect rq_affinity The blk-mq code is using it's own version of the I/O completion affinity tunables, which causes a few issues: - the rq_affinity sysfs file doesn't work for blk-mq devices, even if it still is present, thus breaking existing tuning setups. - the rq_affinity = 1 mode, which is the defauly for legacy request based drivers isn't implemented at all. - blk-mq drivers don't implement any completion affinity with the default flag settings. This patches removes the blk-mq ipi_redirect flag and sysfs file, as well as the internal BLK_MQ_F_SHOULD_IPI flag and replaces it with code that respects the queue-wide rq_affinity flags and also implements the rq_affinity = 1 mode. This means I/O completion affinity can now only be tuned block-queue wide instead of per context, which seems more sensible to me anyway. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 42 ------------------------------------------ block/blk-mq.c | 8 ++++++-- block/blk-mq.h | 1 - include/linux/blk-mq.h | 1 - 4 files changed, 6 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 9176a6984857..8145b5b25b4b 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -203,42 +203,6 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, return ret; } -static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) -{ - ssize_t ret; - - spin_lock(&hctx->lock); - ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI)); - spin_unlock(&hctx->lock); - - return ret; -} - -static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx, - const char *page, size_t len) -{ - struct blk_mq_ctx *ctx; - unsigned long ret; - unsigned int i; - - if (kstrtoul(page, 10, &ret)) { - pr_err("blk-mq-sysfs: invalid input '%s'\n", page); - return -EINVAL; - } - - spin_lock(&hctx->lock); - if (ret) - hctx->flags |= BLK_MQ_F_SHOULD_IPI; - else - hctx->flags &= ~BLK_MQ_F_SHOULD_IPI; - spin_unlock(&hctx->lock); - - hctx_for_each_ctx(hctx, ctx, i) - ctx->ipi_redirect = !!ret; - - return len; -} - static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) { return blk_mq_tag_sysfs_show(hctx->tags, page); @@ -307,11 +271,6 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { .attr = {.name = "pending", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_rq_list_show, }; -static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = { - .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR}, - .show = blk_mq_hw_sysfs_ipi_show, - .store = blk_mq_hw_sysfs_ipi_store, -}; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { .attr = {.name = "tags", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_tags_show, @@ -326,7 +285,6 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_run.attr, &blk_mq_hw_sysfs_dispatched.attr, &blk_mq_hw_sysfs_pending.attr, - &blk_mq_hw_sysfs_ipi.attr, &blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_cpus.attr, NULL, diff --git a/block/blk-mq.c b/block/blk-mq.c index a84112c94e74..f2e92eb92803 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -326,15 +326,19 @@ static void __blk_mq_complete_request_remote(void *data) void __blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; + bool shared = false; int cpu; - if (!ctx->ipi_redirect) { + if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; } cpu = get_cpu(); - if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; diff --git a/block/blk-mq.h b/block/blk-mq.h index b41a784de50d..1ae364ceaf8b 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -11,7 +11,6 @@ struct blk_mq_ctx { unsigned int cpu; unsigned int index_hw; - unsigned int ipi_redirect; /* incremented at dispatch time */ unsigned long rq_dispatched[2]; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ab469d525894..3b561d651a02 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -122,7 +122,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, - BLK_MQ_F_SHOULD_IPI = 1 << 2, BLK_MQ_S_STOPPED = 0, -- cgit v1.2.3 From 7d568a8383bbb9c1f5167781075906acb2bb1550 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 9 Apr 2014 11:07:30 -0400 Subject: kernfs: implement kernfs_root->supers list Currently, there's no way to find out which super_blocks are associated with a given kernfs_root. Let's implement it - the planned inotify extension to kernfs_notify() needs it. Make kernfs_super_info point back to the super_block and chain it at kernfs_root->supers. Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/dir.c | 1 + fs/kernfs/kernfs-internal.h | 5 +++++ fs/kernfs/mount.c | 11 +++++++++++ include/linux/kernfs.h | 4 ++++ 4 files changed, 21 insertions(+) (limited to 'include/linux') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 78f3403300af..43aa97988c31 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -711,6 +711,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, return ERR_PTR(-ENOMEM); ida_init(&root->ino_ida); + INIT_LIST_HEAD(&root->supers); kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, KERNFS_DIR); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 8be13b2a079b..dc84a3ef9ca2 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -49,6 +49,8 @@ static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn) * mount.c */ struct kernfs_super_info { + struct super_block *sb; + /* * The root associated with this super_block. Each super_block is * identified by the root and ns it's associated with. @@ -62,6 +64,9 @@ struct kernfs_super_info { * an array and compare kernfs_node tag against every entry. */ const void *ns; + + /* anchored at kernfs_root->supers, protected by kernfs_mutex */ + struct list_head node; }; #define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info)) diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 6a5f04ac8704..f25a7c0c3cdc 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -68,6 +68,7 @@ static int kernfs_fill_super(struct super_block *sb) struct inode *inode; struct dentry *root; + info->sb = sb; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = SYSFS_MAGIC; @@ -166,12 +167,18 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, *new_sb_created = !sb->s_root; if (!sb->s_root) { + struct kernfs_super_info *info = kernfs_info(sb); + error = kernfs_fill_super(sb); if (error) { deactivate_locked_super(sb); return ERR_PTR(error); } sb->s_flags |= MS_ACTIVE; + + mutex_lock(&kernfs_mutex); + list_add(&info->node, &root->supers); + mutex_unlock(&kernfs_mutex); } return dget(sb->s_root); @@ -190,6 +197,10 @@ void kernfs_kill_sb(struct super_block *sb) struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_node *root_kn = sb->s_root->d_fsdata; + mutex_lock(&kernfs_mutex); + list_del(&info->node); + mutex_unlock(&kernfs_mutex); + /* * Remove the superblock from fs_supers/s_instances * so we can't find it, before freeing kernfs_super_info. diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index b0122dc6f96a..589318b73e61 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -144,6 +144,10 @@ struct kernfs_root { /* private fields, do not use outside kernfs proper */ struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; + + /* list of kernfs_super_info of this root, protected by kernfs_mutex */ + struct list_head supers; + wait_queue_head_t deactivate_waitq; }; -- cgit v1.2.3 From 86d56134f1b67d0c18025ba5cade95c048ed528d Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 10 Apr 2014 14:09:31 -0700 Subject: kobject: Make support for uevent_helper optional. Support for uevent_helper, aka hotplug, is not required on many systems these days but it can still be enabled via sysfs or sysctl. Reported-by: Darren Shepherd Signed-off-by: Michael Marineau Signed-off-by: Greg Kroah-Hartman --- drivers/base/Kconfig | 17 +++++++++++------ include/linux/kobject.h | 2 ++ kernel/ksysfs.c | 5 ++++- kernel/sysctl.c | 4 ++-- lib/kobject_uevent.c | 6 ++++++ 5 files changed, 25 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 8fa8deab6449..4b7b4522b64f 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -1,10 +1,10 @@ menu "Generic Driver Options" -config UEVENT_HELPER_PATH - string "path to uevent helper" - default "" +config UEVENT_HELPER + bool "Support for uevent helper" + default y help - Path to uevent helper program forked by the kernel for + The uevent helper program is forked by the kernel for every uevent. Before the switch to the netlink-based uevent source, this was used to hook hotplug scripts into kernel device events. It @@ -15,8 +15,13 @@ config UEVENT_HELPER_PATH that it creates a high system load, or on smaller systems it is known to create out-of-memory situations during bootup. - To disable user space helper program execution at early boot - time specify an empty string here. This setting can be altered +config UEVENT_HELPER_PATH + string "path to uevent helper" + depends on UEVENT_HELPER + default "" + help + To disable user space helper program execution at by default + specify an empty string here. This setting can still be altered via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper later at runtime. diff --git a/include/linux/kobject.h b/include/linux/kobject.h index f896a33e8341..2d61b909f414 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -32,8 +32,10 @@ #define UEVENT_NUM_ENVP 32 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ +#ifdef CONFIG_UEVENT_HELPER /* path to the userspace helper executed on an event */ extern char uevent_helper[]; +#endif /* counter to tag the uevent, read only except for the kobject core */ extern u64 uevent_seqnum; diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 2495a9b14ac8..6683ccef9fff 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -37,6 +37,7 @@ static ssize_t uevent_seqnum_show(struct kobject *kobj, } KERNEL_ATTR_RO(uevent_seqnum); +#ifdef CONFIG_UEVENT_HELPER /* uevent helper program, used during early boot */ static ssize_t uevent_helper_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -56,7 +57,7 @@ static ssize_t uevent_helper_store(struct kobject *kobj, return count; } KERNEL_ATTR_RW(uevent_helper); - +#endif #ifdef CONFIG_PROFILING static ssize_t profiling_show(struct kobject *kobj, @@ -189,7 +190,9 @@ EXPORT_SYMBOL_GPL(kernel_kobj); static struct attribute * kernel_attrs[] = { &fscaps_attr.attr, &uevent_seqnum_attr.attr, +#ifdef CONFIG_UEVENT_HELPER &uevent_helper_attr.attr, +#endif #ifdef CONFIG_PROFILING &profiling_attr.attr, #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 74f5b580fe34..bc966a8ffc3e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -643,7 +643,7 @@ static struct ctl_table kern_table[] = { .extra2 = &one, }, #endif - +#ifdef CONFIG_UEVENT_HELPER { .procname = "hotplug", .data = &uevent_helper, @@ -651,7 +651,7 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dostring, }, - +#endif #ifdef CONFIG_CHR_DEV_SG { .procname = "sg-big-buff", diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 4e3bd71bd949..9ebf9e20de53 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -29,7 +29,9 @@ u64 uevent_seqnum; +#ifdef CONFIG_UEVENT_HELPER char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; +#endif #ifdef CONFIG_NET struct uevent_sock { struct list_head list; @@ -109,6 +111,7 @@ static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) } #endif +#ifdef CONFIG_UEVENT_HELPER static int kobj_usermode_filter(struct kobject *kobj) { const struct kobj_ns_type_operations *ops; @@ -147,6 +150,7 @@ static void cleanup_uevent_env(struct subprocess_info *info) { kfree(info->data); } +#endif /** * kobject_uevent_env - send an uevent with environmental data @@ -323,6 +327,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, #endif mutex_unlock(&uevent_sock_mutex); +#ifdef CONFIG_UEVENT_HELPER /* call uevent_helper, usually only enabled during early boot */ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { struct subprocess_info *info; @@ -347,6 +352,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, env = NULL; /* freed by cleanup_uevent_env */ } } +#endif exit: kfree(devpath); -- cgit v1.2.3 From ea7e586bdd331fd6fba2b6f9fd3777928c2814d8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 13 Apr 2014 20:08:00 +0100 Subject: iio: st_sensors: move regulator retrieveal to core Currently the pressure sensor has code to retrieve and enable two regulators for Vdd and Vdd IO, but actually these voltage inputs are found on all of these ST sensors, so move the regulator handling to the core and make sure all the ST sensors call these functions on probe() and remove() to enable/disable power. Here also mover over to obtaining the regulator from the *parent* device of the IIO device, as the IIO device is created on-the-fly in this very subsystem it very unlikely evert have any regulators attached to it whatsoever. It is much more likely that the parent is a platform device, possibly instantiated from a device tree, which in turn have Vdd and Vdd IO supplied assigned to it. Cc: Lee Jones Cc: Denis CIOCCA Signed-off-by: Linus Walleij Signed-off-by: Jonathan Cameron --- drivers/iio/accel/st_accel_core.c | 4 +++ drivers/iio/common/st_sensors/st_sensors_core.c | 37 +++++++++++++++++++++++ drivers/iio/gyro/st_gyro_core.c | 4 +++ drivers/iio/magnetometer/st_magn_core.c | 4 +++ drivers/iio/pressure/st_pressure_core.c | 39 ++----------------------- include/linux/iio/common/st_sensors.h | 4 +++ 6 files changed, 55 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 4e06fcf5b891..a2abf7c2ce3b 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -459,6 +459,8 @@ int st_accel_common_probe(struct iio_dev *indio_dev, indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &accel_info; + st_sensors_power_enable(indio_dev); + err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_accel_sensors), st_accel_sensors); if (err < 0) @@ -515,6 +517,8 @@ void st_accel_common_remove(struct iio_dev *indio_dev) { struct st_sensor_data *adata = iio_priv(indio_dev); + st_sensors_power_disable(indio_dev); + iio_device_unregister(indio_dev); if (adata->get_irq_data_ready(indio_dev) > 0) st_sensors_deallocate_trigger(indio_dev); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 7ba1ef270213..e8b932fed70e 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -198,6 +199,42 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable) } EXPORT_SYMBOL(st_sensors_set_axis_enable); +void st_sensors_power_enable(struct iio_dev *indio_dev) +{ + struct st_sensor_data *pdata = iio_priv(indio_dev); + int err; + + /* Regulators not mandatory, but if requested we should enable them. */ + pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd"); + if (!IS_ERR(pdata->vdd)) { + err = regulator_enable(pdata->vdd); + if (err != 0) + dev_warn(&indio_dev->dev, + "Failed to enable specified Vdd supply\n"); + } + + pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio"); + if (!IS_ERR(pdata->vdd_io)) { + err = regulator_enable(pdata->vdd_io); + if (err != 0) + dev_warn(&indio_dev->dev, + "Failed to enable specified Vdd_IO supply\n"); + } +} +EXPORT_SYMBOL(st_sensors_power_enable); + +void st_sensors_power_disable(struct iio_dev *indio_dev) +{ + struct st_sensor_data *pdata = iio_priv(indio_dev); + + if (!IS_ERR(pdata->vdd)) + regulator_disable(pdata->vdd); + + if (!IS_ERR(pdata->vdd_io)) + regulator_disable(pdata->vdd_io); +} +EXPORT_SYMBOL(st_sensors_power_disable); + static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev, struct st_sensors_platform_data *pdata) { diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index bc71f4d1e2ce..ed74a9069989 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -311,6 +311,8 @@ int st_gyro_common_probe(struct iio_dev *indio_dev, indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &gyro_info; + st_sensors_power_enable(indio_dev); + err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_gyro_sensors), st_gyro_sensors); if (err < 0) @@ -363,6 +365,8 @@ void st_gyro_common_remove(struct iio_dev *indio_dev) { struct st_sensor_data *gdata = iio_priv(indio_dev); + st_sensors_power_disable(indio_dev); + iio_device_unregister(indio_dev); if (gdata->get_irq_data_ready(indio_dev) > 0) st_sensors_deallocate_trigger(indio_dev); diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 8e33a7682d33..240a21dd0c61 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -355,6 +355,8 @@ int st_magn_common_probe(struct iio_dev *indio_dev, indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &magn_info; + st_sensors_power_enable(indio_dev); + err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_magn_sensors), st_magn_sensors); if (err < 0) @@ -406,6 +408,8 @@ void st_magn_common_remove(struct iio_dev *indio_dev) { struct st_sensor_data *mdata = iio_priv(indio_dev); + st_sensors_power_disable(indio_dev); + iio_device_unregister(indio_dev); if (mdata->get_irq_data_ready(indio_dev) > 0) st_sensors_deallocate_trigger(indio_dev); diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 013becbe8f47..cd7e01f3a93b 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -387,40 +386,6 @@ static const struct iio_trigger_ops st_press_trigger_ops = { #define ST_PRESS_TRIGGER_OPS NULL #endif -static void st_press_power_enable(struct iio_dev *indio_dev) -{ - struct st_sensor_data *pdata = iio_priv(indio_dev); - int err; - - /* Regulators not mandatory, but if requested we should enable them. */ - pdata->vdd = devm_regulator_get_optional(&indio_dev->dev, "vdd"); - if (!IS_ERR(pdata->vdd)) { - err = regulator_enable(pdata->vdd); - if (err != 0) - dev_warn(&indio_dev->dev, - "Failed to enable specified Vdd supply\n"); - } - - pdata->vdd_io = devm_regulator_get_optional(&indio_dev->dev, "vddio"); - if (!IS_ERR(pdata->vdd_io)) { - err = regulator_enable(pdata->vdd_io); - if (err != 0) - dev_warn(&indio_dev->dev, - "Failed to enable specified Vdd_IO supply\n"); - } -} - -static void st_press_power_disable(struct iio_dev *indio_dev) -{ - struct st_sensor_data *pdata = iio_priv(indio_dev); - - if (!IS_ERR(pdata->vdd)) - regulator_disable(pdata->vdd); - - if (!IS_ERR(pdata->vdd_io)) - regulator_disable(pdata->vdd_io); -} - int st_press_common_probe(struct iio_dev *indio_dev, struct st_sensors_platform_data *plat_data) { @@ -431,7 +396,7 @@ int st_press_common_probe(struct iio_dev *indio_dev, indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &press_info; - st_press_power_enable(indio_dev); + st_sensors_power_enable(indio_dev); err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_press_sensors), @@ -493,7 +458,7 @@ void st_press_common_remove(struct iio_dev *indio_dev) { struct st_sensor_data *pdata = iio_priv(indio_dev); - st_press_power_disable(indio_dev); + st_sensors_power_disable(indio_dev); iio_device_unregister(indio_dev); if (pdata->get_irq_data_ready(indio_dev) > 0) diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 3c005eb3a0a4..96f51f0e0096 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -269,6 +269,10 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable); int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); +void st_sensors_power_enable(struct iio_dev *indio_dev); + +void st_sensors_power_disable(struct iio_dev *indio_dev); + int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); -- cgit v1.2.3 From 1c8732bb0355b929b09173464cdca7df4d516f89 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 9 Apr 2014 13:34:39 +0200 Subject: gpio: support threaded interrupts in irqchip helpers Some off-chip GPIO expanders need to be communicated by I2C or SPI traffic, but may still support IRQs. By the sleeping nature of such buses, such IRQ handlers need to be threaded. Support such handlers in the gpiochip irqchip helpers by flagging IRQs as threaded if the .can_sleep property of the gpiochip is true. Helpfully deny registration of chained IRQ handlers if the .can_sleep property is set, as such chips will invariably need a nested handler rather than a chained handler. Cc: Thomas Gleixner Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 12 ++++++++++++ include/linux/gpio/driver.h | 5 ++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f48817d97480..c12fe9dfd2db 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1363,6 +1363,11 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, int parent_irq, irq_flow_handler_t parent_handler) { + if (gpiochip->can_sleep) { + chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n"); + return; + } + irq_set_chained_handler(parent_irq, parent_handler); /* * The parent irqchip is already using the chip_data for this @@ -1389,6 +1394,9 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_set_chip_data(irq, chip); irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler); + /* Chips that can sleep need nested thread handlers */ + if (chip->can_sleep) + irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else @@ -1401,9 +1409,13 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { + struct gpio_chip *chip = d->host_data; + #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif + if (chip->can_sleep) + irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1827b43966d9..573e4f3243d0 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -51,7 +51,10 @@ struct seq_file; * format specifier for an unsigned int. It is substituted by the actual * number of the gpio. * @can_sleep: flag must be set iff get()/set() methods sleep, as they - * must while accessing GPIO expander chips over I2C or SPI + * must while accessing GPIO expander chips over I2C or SPI. This + * implies that if the chip supports IRQs, these IRQs need to be threaded + * as the chip access may sleep when e.g. reading out the IRQ status + * registers. * @exported: flags if the gpiochip is exported for use from sysfs. Private. * * A gpio_chip can help platforms abstract various sources of GPIOs so -- cgit v1.2.3 From 5c81f2078b7be63be49916128cc86bc17be7f348 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 9 Apr 2014 12:50:40 +0200 Subject: gpio: tc3589x: get rid of static IRQ base The static IRQ base is not used on any platforms with this chip (only Ux500). Get rid of it forever, and rely on dynamic IRQ descriptor allocation. Cc: Samuel Ortiz Cc: Lee Jones Signed-off-by: Linus Walleij --- drivers/gpio/gpio-tc3589x.c | 10 +--------- include/linux/mfd/tc3589x.h | 1 - 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 113e50cb1f59..4b0d8ccbe561 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -32,9 +32,6 @@ struct tc3589x_gpio { struct device *dev; struct mutex irq_lock; struct irq_domain *domain; - - int irq_base; - /* Caches of interrupt control registers for bus_lock */ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; @@ -290,8 +287,6 @@ static struct irq_domain_ops tc3589x_irq_ops = { static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio, struct device_node *np) { - int base = tc3589x_gpio->irq_base; - /* * If this results in a linear domain, irq_create_mapping() will * take care of allocating IRQ descriptors at runtime. When a base @@ -299,7 +294,7 @@ static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio, * domain is instantiated. */ tc3589x_gpio->domain = irq_domain_add_simple(np, - tc3589x_gpio->chip.ngpio, base, &tc3589x_irq_ops, + tc3589x_gpio->chip.ngpio, 0, &tc3589x_irq_ops, tc3589x_gpio); if (!tc3589x_gpio->domain) { dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n"); @@ -348,9 +343,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) tc3589x_gpio->chip.of_node = np; #endif - tc3589x_gpio->irq_base = tc3589x->irq_base ? - tc3589x->irq_base + TC3589x_INT_GPIO(0) : 0; - /* Bring the GPIO module out of reset */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL, TC3589x_RSTCTRL_GPIRST, 0); diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h index 6b8e1ff4672b..e6088c2e2092 100644 --- a/include/linux/mfd/tc3589x.h +++ b/include/linux/mfd/tc3589x.h @@ -111,7 +111,6 @@ enum tx3589x_block { #define TC3589x_INT_PORIRQ 7 #define TC3589x_NR_INTERNAL_IRQS 8 -#define TC3589x_INT_GPIO(x) (TC3589x_NR_INTERNAL_IRQS + (x)) struct tc3589x { struct mutex lock; -- cgit v1.2.3 From a9af65223b41cec60cd44fa95a93d10149deb143 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 24 Apr 2014 19:46:49 +0900 Subject: extcon: Add extcon_dev_allocate/free() to control the memory of extcon device This patch add APIs to control the extcon device on extcon provider driver. The extcon_dev_allocate() allocates the memory of extcon device and initializes supported cables. And then extcon_dev_free() decrement the reference of the device of extcon device and free the memory of the extcon device. This APIs must need to implement devm_extcon_dev_allocate()/free() APIs. Signed-off-by: Chanwoo Choi Reviewed-by: Felipe Balbi --- drivers/extcon/extcon-class.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/extcon.h | 13 +++++++++++++ 2 files changed, 49 insertions(+) (limited to 'include/linux') diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index f6df68989651..654ed52e17c2 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c @@ -565,6 +565,42 @@ static void dummy_sysfs_dev_release(struct device *dev) { } +/* + * extcon_dev_allocate() - Allocate the memory of extcon device. + * @supported_cable: Array of supported cable names ending with NULL. + * If supported_cable is NULL, cable name related APIs + * are disabled. + * + * This function allocates the memory for extcon device without allocating + * memory in each extcon provider driver and initialize default setting for + * extcon device. + * + * Return the pointer of extcon device if success or ERR_PTR(err) if fail + */ +struct extcon_dev *extcon_dev_allocate(const char **supported_cable) +{ + struct extcon_dev *edev; + + edev = kzalloc(sizeof(*edev), GFP_KERNEL); + if (!edev) + return ERR_PTR(-ENOMEM); + + edev->max_supported = 0; + edev->supported_cable = supported_cable; + + return edev; +} + +/* + * extcon_dev_free() - Free the memory of extcon device. + * @edev: the extcon device to free + */ +void extcon_dev_free(struct extcon_dev *edev) +{ + kfree(edev); +} +EXPORT_SYMBOL_GPL(extcon_dev_free); + /** * extcon_dev_register() - Register a new extcon device * @edev : the new extcon device (should be allocated before calling) diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 548447be2d8f..15361a2f2f19 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -191,6 +191,12 @@ extern void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev); extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); +/* + * Following APIs control the memory of extcon device. + */ +extern struct extcon_dev *extcon_dev_allocate(const char **cables); +extern void extcon_dev_free(struct extcon_dev *edev); + /* * get/set/update_state access the 32b encoded state value, which represents * states of all possible cables of the multistate port. For example, if one @@ -267,6 +273,13 @@ static inline int devm_extcon_dev_register(struct device *dev, static inline void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev) { } +static inline struct extcon_dev *extcon_dev_allocate(const char **cables) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void extcon_dev_free(struct extcon_dev *edev) { } + static inline u32 extcon_get_state(struct extcon_dev *edev) { return 0; -- cgit v1.2.3 From 739ba1bfdb15e773999aafddbd6c59b5737797a0 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 24 Apr 2014 20:12:15 +0900 Subject: extcon: Add devm_extcon_dev_allocate/free to manage the resource of extcon device This patch add device managed devm_extcon_dev_{allocate,free} to automatically free the memory of extcon_dev structure without handling free operation. Signed-off-by: Chanwoo Choi Reviewed-by: Felipe Balbi --- drivers/extcon/extcon-class.c | 70 +++++++++++++++++++++++++++++++++++-------- include/linux/extcon.h | 11 +++++++ 2 files changed, 69 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index 654ed52e17c2..18d42c0e4581 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c @@ -601,6 +601,64 @@ void extcon_dev_free(struct extcon_dev *edev) } EXPORT_SYMBOL_GPL(extcon_dev_free); +static int devm_extcon_dev_match(struct device *dev, void *res, void *data) +{ + struct extcon_dev **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; +} + +static void devm_extcon_dev_release(struct device *dev, void *res) +{ + extcon_dev_free(*(struct extcon_dev **)res); +} + +/** + * devm_extcon_dev_allocate - Allocate managed extcon device + * @dev: device owning the extcon device being created + * @supported_cable: Array of supported cable names ending with NULL. + * If supported_cable is NULL, cable name related APIs + * are disabled. + * + * This function manages automatically the memory of extcon device using device + * resource management and simplify the control of freeing the memory of extcon + * device. + * + * Returns the pointer memory of allocated extcon_dev if success + * or ERR_PTR(err) if fail + */ +struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const char **supported_cable) +{ + struct extcon_dev **ptr, *edev; + + ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + edev = extcon_dev_allocate(supported_cable); + if (IS_ERR(edev)) { + devres_free(ptr); + return edev; + } + + *ptr = edev; + devres_add(dev, ptr); + + return edev; +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate); + +void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev) +{ + WARN_ON(devres_release(dev, devm_extcon_dev_release, + devm_extcon_dev_match, edev)); +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_free); + /** * extcon_dev_register() - Register a new extcon device * @edev : the new extcon device (should be allocated before calling) @@ -860,18 +918,6 @@ static void devm_extcon_dev_unreg(struct device *dev, void *res) extcon_dev_unregister(*(struct extcon_dev **)res); } -static int devm_extcon_dev_match(struct device *dev, void *res, void *data) -{ - struct extcon_dev **r = res; - - if (!r || !*r) { - WARN_ON(!r || !*r); - return 0; - } - - return *r == data; -} - /** * devm_extcon_dev_register() - Resource-managed extcon_dev_register() * @dev: device to allocate extcon device diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 15361a2f2f19..36f49c405dfb 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -196,6 +196,9 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); */ extern struct extcon_dev *extcon_dev_allocate(const char **cables); extern void extcon_dev_free(struct extcon_dev *edev); +extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const char **cables); +extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); /* * get/set/update_state access the 32b encoded state value, which represents @@ -280,6 +283,14 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables) static inline void extcon_dev_free(struct extcon_dev *edev) { } +static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const char **cables) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_extcon_dev_free(struct extcon_dev *edev) { } + static inline u32 extcon_get_state(struct extcon_dev *edev) { return 0; -- cgit v1.2.3 From 3f79a3fb5f41e8f2229e5bf8aa725eaa79686f14 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Mon, 21 Apr 2014 20:44:53 +0900 Subject: extcon: palmas: Use devm_extcon_dev_allocate for extcon_dev This patch use devm_extcon_dev_allocate() to simplify the memory control of extcon device. Cc: Graeme Gregory Cc: Kishon Vijay Abraham I Cc: Felipe Balbi Cc: Samuel Ortiz Cc: Lee Jones Signed-off-by: Chanwoo Choi Acked-by: Lee Jones Acked-by: Felipe Balbi Tested-by: Felipe Balbi --- drivers/extcon/extcon-palmas.c | 35 ++++++++++++++++++++--------------- include/linux/mfd/palmas.h | 2 +- 2 files changed, 21 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 1a770e0ee9ae..7417ce84eb2d 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -57,7 +57,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb) if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) { if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) { palmas_usb->linkstat = PALMAS_USB_STATE_VBUS; - extcon_set_cable_state(&palmas_usb->edev, "USB", true); + extcon_set_cable_state(palmas_usb->edev, "USB", true); dev_info(palmas_usb->dev, "USB cable is attached\n"); } else { dev_dbg(palmas_usb->dev, @@ -66,7 +66,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb) } else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) { if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) { palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; - extcon_set_cable_state(&palmas_usb->edev, "USB", false); + extcon_set_cable_state(palmas_usb->edev, "USB", false); dev_info(palmas_usb->dev, "USB cable is detached\n"); } else { dev_dbg(palmas_usb->dev, @@ -93,7 +93,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) PALMAS_USB_ID_INT_LATCH_CLR, PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); palmas_usb->linkstat = PALMAS_USB_STATE_ID; - extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); + extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true); dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) && (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) { @@ -101,17 +101,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) PALMAS_USB_ID_INT_LATCH_CLR, PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; - extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false); + extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false); dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) && (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) { palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; - extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false); + extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false); dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) && (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { palmas_usb->linkstat = PALMAS_USB_STATE_ID; - extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); + extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true); dev_info(palmas_usb->dev, " USB-HOST cable is attached\n"); } @@ -187,15 +187,20 @@ static int palmas_usb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, palmas_usb); - palmas_usb->edev.supported_cable = palmas_extcon_cable; - palmas_usb->edev.dev.parent = palmas_usb->dev; - palmas_usb->edev.name = kstrdup(node->name, GFP_KERNEL); - palmas_usb->edev.mutually_exclusive = mutually_exclusive; + palmas_usb->edev = devm_extcon_dev_allocate(&pdev->dev, + palmas_extcon_cable); + if (IS_ERR(palmas_usb->edev)) { + dev_err(&pdev->dev, "failed to allocate extcon device\n"); + return -ENOMEM; + } + palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL); + palmas_usb->edev->dev.parent = palmas_usb->dev; + palmas_usb->edev->mutually_exclusive = mutually_exclusive; - status = devm_extcon_dev_register(&pdev->dev, &palmas_usb->edev); + status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); if (status) { dev_err(&pdev->dev, "failed to register extcon device\n"); - kfree(palmas_usb->edev.name); + kfree(palmas_usb->edev->name); return status; } @@ -209,7 +214,7 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->id_irq, status); - kfree(palmas_usb->edev.name); + kfree(palmas_usb->edev->name); return status; } } @@ -224,7 +229,7 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->vbus_irq, status); - kfree(palmas_usb->edev.name); + kfree(palmas_usb->edev->name); return status; } } @@ -238,7 +243,7 @@ static int palmas_usb_remove(struct platform_device *pdev) { struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); - kfree(palmas_usb->edev.name); + kfree(palmas_usb->edev->name); return 0; } diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 9974e387e483..b8f87b704409 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -415,7 +415,7 @@ struct palmas_usb { struct palmas *palmas; struct device *dev; - struct extcon_dev edev; + struct extcon_dev *edev; int id_otg_irq; int id_irq; -- cgit v1.2.3 From 8ad357551797b1edc184fb9f6a4f80a6fa626459 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 14 Mar 2014 11:00:21 +0100 Subject: KVM: s390: enable IBS for single running VCPUs This patch enables the IBS facility when a single VCPU is running. The facility is dynamically turned on/off as soon as other VCPUs enter/leave the stopped state. When this facility is operating, some instructions can be executed faster for single-cpu guests. Signed-off-by: David Hildenbrand Reviewed-by: Dominik Dingel Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 + arch/s390/kvm/kvm-s390.c | 123 ++++++++++++++++++++++++++++++++++++++- arch/s390/kvm/trace-s390.h | 22 +++++++ include/linux/kvm_host.h | 2 + 4 files changed, 147 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 0d45f6fe734f..f0a1dc5e5d1f 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -72,6 +72,7 @@ struct sca_block { #define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_SM 0x00000080 +#define CPUSTAT_IBS 0x00000040 #define CPUSTAT_G 0x00000008 #define CPUSTAT_GED 0x00000004 #define CPUSTAT_J 0x00000002 @@ -411,6 +412,7 @@ struct kvm_arch{ int use_cmma; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; + spinlock_t start_stop_lock; }; #define KVM_HVA_ERR_BAD (-1UL) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6c972d229ace..0a01744cbdd9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -458,6 +458,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.css_support = 0; kvm->arch.use_irqchip = 0; + spin_lock_init(&kvm->arch.start_stop_lock); + return 0; out_nogmap: debug_unregister(kvm->arch.dbf); @@ -996,8 +998,15 @@ bool kvm_s390_cmma_enabled(struct kvm *kvm) return true; } +static bool ibs_enabled(struct kvm_vcpu *vcpu) +{ + return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; +} + static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { +retry: + s390_vcpu_unblock(vcpu); /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. @@ -1005,15 +1014,34 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) * already finished. We might race against a second unmapper that * wants to set the blocking bit. Lets just retry the request loop. */ - while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { + if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { int rc; rc = gmap_ipte_notify(vcpu->arch.gmap, vcpu->arch.sie_block->prefix, PAGE_SIZE * 2); if (rc) return rc; - s390_vcpu_unblock(vcpu); + goto retry; + } + + if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { + if (!ibs_enabled(vcpu)) { + trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); + atomic_set_mask(CPUSTAT_IBS, + &vcpu->arch.sie_block->cpuflags); + } + goto retry; } + + if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { + if (ibs_enabled(vcpu)) { + trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); + atomic_clear_mask(CPUSTAT_IBS, + &vcpu->arch.sie_block->cpuflags); + } + goto retry; + } + return 0; } @@ -1362,16 +1390,107 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } +static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) +{ + return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; +} + +static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) +{ + kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); + kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); + exit_sie_sync(vcpu); +} + +static void __disable_ibs_on_all_vcpus(struct kvm *kvm) +{ + unsigned int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + __disable_ibs_on_vcpu(vcpu); + } +} + +static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) +{ + kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); + kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); + exit_sie_sync(vcpu); +} + void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) { + int i, online_vcpus, started_vcpus = 0; + + if (!is_vcpu_stopped(vcpu)) + return; + trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); + /* Only one cpu at a time may enter/leave the STOPPED state. */ + spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); + online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); + + for (i = 0; i < online_vcpus; i++) { + if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) + started_vcpus++; + } + + if (started_vcpus == 0) { + /* we're the only active VCPU -> speed it up */ + __enable_ibs_on_vcpu(vcpu); + } else if (started_vcpus == 1) { + /* + * As we are starting a second VCPU, we have to disable + * the IBS facility on all VCPUs to remove potentially + * oustanding ENABLE requests. + */ + __disable_ibs_on_all_vcpus(vcpu->kvm); + } + atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + /* + * Another VCPU might have used IBS while we were offline. + * Let's play safe and flush the VCPU at startup. + */ + vcpu->arch.sie_block->ihcpu = 0xffff; + spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); + return; } void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) { + int i, online_vcpus, started_vcpus = 0; + struct kvm_vcpu *started_vcpu = NULL; + + if (is_vcpu_stopped(vcpu)) + return; + trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); + /* Only one cpu at a time may enter/leave the STOPPED state. */ + spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); + online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); + atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + __disable_ibs_on_vcpu(vcpu); + + for (i = 0; i < online_vcpus; i++) { + if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { + started_vcpus++; + started_vcpu = vcpu->kvm->vcpus[i]; + } + } + + if (started_vcpus == 1) { + /* + * As we only have one VCPU left, we want to enable the + * IBS facility for that VCPU to speed it up. + */ + __enable_ibs_on_vcpu(started_vcpu); + } + + spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); + return; } static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 34d4f8af3a1d..647e9d6a4818 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -244,6 +244,28 @@ TRACE_EVENT(kvm_s390_enable_css, __entry->kvm) ); +/* + * Trace point for enabling and disabling interlocking-and-broadcasting + * suppression. + */ +TRACE_EVENT(kvm_s390_enable_disable_ibs, + TP_PROTO(unsigned int id, int state), + TP_ARGS(id, state), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(int, state) + ), + + TP_fast_assign( + __entry->id = id; + __entry->state = state; + ), + + TP_printk("%s ibs on cpu %d", + __entry->state ? "enabling" : "disabling", __entry->id) + ); + #endif /* _TRACE_KVMS390_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 820fc2e1d9df..1e125b055327 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -134,6 +134,8 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_EPR_EXIT 20 #define KVM_REQ_SCAN_IOAPIC 21 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 +#define KVM_REQ_ENABLE_IBS 23 +#define KVM_REQ_DISABLE_IBS 24 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 -- cgit v1.2.3 From a235c0916543d8b886405f8871dc644124c7cf78 Mon Sep 17 00:00:00 2001 From: Iulia Manda Date: Wed, 12 Mar 2014 18:37:24 +0200 Subject: rcu: Remove "extern" from function declaration in include/linux/rcupdate.h Because functions have the extern storage class specifier by default, this keyword can be removed. It is redundant to use it explicitly. Signed-off-by: Iulia Manda Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 00a7fd61b3c6..fdc422f3d61d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -328,7 +328,7 @@ extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; -extern int debug_lockdep_rcu_enabled(void); +int debug_lockdep_rcu_enabled(void); /** * rcu_read_lock_held() - might we be in RCU read-side critical section? -- cgit v1.2.3 From 71a9b26963f8c2d0df6f782e2b29ccefc22d4fba Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 31 Mar 2014 13:13:02 -0700 Subject: rcu: Document RCU_INIT_POINTER()'s lack of ordering guarantees Although rcu_assign_pointer() provides ordering guarantees, RCU_INIT_POINTER() does not. This commit makes that explicit in the docbook comment header. Reported-by: Lai Jiangshan Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index fdc422f3d61d..3c5ef02ea580 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -949,6 +949,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer -after- you have completely initialized * the reader-accessible portions of the linked structure. + * + * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no + * ordering guarantees for either the CPU or the compiler. */ #define RCU_INIT_POINTER(p, v) \ do { \ -- cgit v1.2.3 From 3046365bb470f0ec2f7cf5cb07a8ee7e4b490103 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 29 Apr 2014 00:51:00 +0100 Subject: devres: introduce API "devm_kmemdup Introduce devm_kmemdup, which uses resource managed kmalloc. There are several request from maintainers to add this instead of using kmemdup. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- Documentation/driver-model/devres.txt | 1 + drivers/base/devres.c | 21 +++++++++++++++++++++ include/linux/device.h | 2 ++ 3 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 4f7897e99cba..499951873997 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -236,6 +236,7 @@ certainly invest a bit more effort into libata core layer). MEM devm_kzalloc() devm_kfree() + devm_kmemdup() IIO devm_iio_device_alloc() diff --git a/drivers/base/devres.c b/drivers/base/devres.c index db4e264eecb6..d0914cba2413 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -831,3 +831,24 @@ void devm_kfree(struct device *dev, void *p) WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_kfree); + +/** + * devm_kmemdup - Resource-managed kmemdup + * @dev: Device this memory belongs to + * @src: Memory region to duplicate + * @len: Memory region length + * @gfp: GFP mask to use + * + * Duplicate region of a memory using resource managed kmalloc + */ +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) +{ + void *p; + + p = devm_kmalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +EXPORT_SYMBOL_GPL(devm_kmemdup); diff --git a/include/linux/device.h b/include/linux/device.h index d1d1c055b48e..ab871588da89 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -623,6 +623,8 @@ static inline void *devm_kcalloc(struct device *dev, } extern void devm_kfree(struct device *dev, void *p); extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); void __iomem *devm_request_and_ioremap(struct device *dev, -- cgit v1.2.3 From 9fbfb4b37ed23f71aa9484484266381c6c6964cb Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 29 Apr 2014 00:51:00 +0100 Subject: IIO: core: Introduce read_raw_multi This callback is introduced to overcome some limitations of existing read_raw callback. The functionality of both existing read_raw and read_raw_multi is similar, both are used to request values from the device. The current read_raw callback allows only two return values. The new read_raw_multi allows returning multiple values. Instead of passing just address of val and val2, it passes length and pointer to values. Depending on the type and length of passed buffer, iio client drivers can return multiple values. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- drivers/iio/iio_core.h | 2 +- drivers/iio/industrialio-core.c | 65 ++++++++++++++++++++++++++-------------- drivers/iio/industrialio-event.c | 6 ++-- drivers/iio/inkern.c | 16 ++++++++-- include/linux/iio/iio.h | 17 +++++++++++ include/linux/iio/types.h | 1 + 6 files changed, 80 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h index f6db6af36ba6..5f0ea77fe717 100644 --- a/drivers/iio/iio_core.h +++ b/drivers/iio/iio_core.h @@ -35,7 +35,7 @@ int __iio_add_chan_devattr(const char *postfix, struct list_head *attr_list); void iio_free_chan_devattr_list(struct list_head *attr_list); -ssize_t iio_format_value(char *buf, unsigned int type, int val, int val2); +ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); /* Event interface flags */ #define IIO_BUSY_BIT_POS 1 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 184444db62ac..59540859bfae 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -373,41 +373,53 @@ EXPORT_SYMBOL_GPL(iio_enum_write); * @buf: The buffer to which the formated value gets written * @type: One of the IIO_VAL_... constants. This decides how the val and val2 * parameters are formatted. - * @val: First part of the value, exact meaning depends on the type parameter. - * @val2: Second part of the value, exact meaning depends on the type parameter. + * @vals: pointer to the values, exact meaning depends on the type parameter. */ -ssize_t iio_format_value(char *buf, unsigned int type, int val, int val2) +ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) { unsigned long long tmp; bool scale_db = false; switch (type) { case IIO_VAL_INT: - return sprintf(buf, "%d\n", val); + return sprintf(buf, "%d\n", vals[0]); case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; case IIO_VAL_INT_PLUS_MICRO: - if (val2 < 0) - return sprintf(buf, "-%ld.%06u%s\n", abs(val), -val2, + if (vals[1] < 0) + return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]), + -vals[1], scale_db ? " dB" : ""); else - return sprintf(buf, "%d.%06u%s\n", val, val2, + return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1], scale_db ? " dB" : ""); case IIO_VAL_INT_PLUS_NANO: - if (val2 < 0) - return sprintf(buf, "-%ld.%09u\n", abs(val), -val2); + if (vals[1] < 0) + return sprintf(buf, "-%ld.%09u\n", abs(vals[0]), + -vals[1]); else - return sprintf(buf, "%d.%09u\n", val, val2); + return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); case IIO_VAL_FRACTIONAL: - tmp = div_s64((s64)val * 1000000000LL, val2); - val2 = do_div(tmp, 1000000000LL); - val = tmp; - return sprintf(buf, "%d.%09u\n", val, val2); + tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); + vals[1] = do_div(tmp, 1000000000LL); + vals[0] = tmp; + return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); case IIO_VAL_FRACTIONAL_LOG2: - tmp = (s64)val * 1000000000LL >> val2; - val2 = do_div(tmp, 1000000000LL); - val = tmp; - return sprintf(buf, "%d.%09u\n", val, val2); + tmp = (s64)vals[0] * 1000000000LL >> vals[1]; + vals[1] = do_div(tmp, 1000000000LL); + vals[0] = tmp; + return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); + case IIO_VAL_INT_MULTIPLE: + { + int i; + int len = 0; + + for (i = 0; i < size; ++i) + len += snprintf(&buf[len], PAGE_SIZE - len, "%d ", + vals[i]); + len += snprintf(&buf[len], PAGE_SIZE - len, "\n"); + return len; + } default: return 0; } @@ -419,14 +431,23 @@ static ssize_t iio_read_channel_info(struct device *dev, { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); - int val, val2; - int ret = indio_dev->info->read_raw(indio_dev, this_attr->c, - &val, &val2, this_attr->address); + int vals[INDIO_MAX_RAW_ELEMENTS]; + int ret; + int val_len = 2; + + if (indio_dev->info->read_raw_multi) + ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, + INDIO_MAX_RAW_ELEMENTS, + vals, &val_len, + this_attr->address); + else + ret = indio_dev->info->read_raw(indio_dev, this_attr->c, + &vals[0], &vals[1], this_attr->address); if (ret < 0) return ret; - return iio_format_value(buf, ret, val, val2); + return iio_format_value(buf, ret, val_len, vals); } /** diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index dddfb0f90d34..258a973a1fb8 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -270,7 +270,7 @@ static ssize_t iio_ev_value_show(struct device *dev, { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); - int val, val2; + int val, val2, val_arr[2]; int ret; ret = indio_dev->info->read_event_value(indio_dev, @@ -279,7 +279,9 @@ static ssize_t iio_ev_value_show(struct device *dev, &val, &val2); if (ret < 0) return ret; - return iio_format_value(buf, ret, val, val2); + val_arr[0] = val; + val_arr[1] = val2; + return iio_format_value(buf, ret, 2, val_arr); } static ssize_t iio_ev_value_store(struct device *dev, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index adeba5a0ecf7..d833d55052ea 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -417,12 +417,24 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, enum iio_chan_info_enum info) { int unused; + int vals[INDIO_MAX_RAW_ELEMENTS]; + int ret; + int val_len = 2; if (val2 == NULL) val2 = &unused; - return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel, - val, val2, info); + if (chan->indio_dev->info->read_raw_multi) { + ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, + chan->channel, INDIO_MAX_RAW_ELEMENTS, + vals, &val_len, info); + *val = vals[0]; + *val2 = vals[1]; + } else + ret = chan->indio_dev->info->read_raw(chan->indio_dev, + chan->channel, val, val2, info); + + return ret; } int iio_read_channel_raw(struct iio_channel *chan, int *val) diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 5f2d00e7e488..5629c92eeadf 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -288,6 +288,8 @@ static inline s64 iio_get_time_ns(void) #define INDIO_ALL_BUFFER_MODES \ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE) +#define INDIO_MAX_RAW_ELEMENTS 4 + struct iio_trigger; /* forward declaration */ struct iio_dev; @@ -302,6 +304,14 @@ struct iio_dev; * the channel in question. Return value will specify the * type of value returned by the device. val and val2 will * contain the elements making up the returned value. + * @read_raw_multi: function to return values from the device. + * mask specifies which value. Note 0 means a reading of + * the channel in question. Return value will specify the + * type of value returned by the device. vals pointer + * contain the elements making up the returned value. + * max_len specifies maximum number of elements + * vals pointer can contain. val_len is used to return + * length of valid elements in vals. * @write_raw: function to write a value to the device. * Parameters are the same as for read_raw. * @write_raw_get_fmt: callback function to query the expected @@ -328,6 +338,13 @@ struct iio_info { int *val2, long mask); + int (*read_raw_multi)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int max_len, + int *vals, + int *val_len, + long mask); + int (*write_raw)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 084d882fe01b..a13c2241abce 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -79,6 +79,7 @@ enum iio_event_direction { #define IIO_VAL_INT_PLUS_MICRO 2 #define IIO_VAL_INT_PLUS_NANO 3 #define IIO_VAL_INT_PLUS_MICRO_DB 4 +#define IIO_VAL_INT_MULTIPLE 5 #define IIO_VAL_FRACTIONAL 10 #define IIO_VAL_FRACTIONAL_LOG2 11 -- cgit v1.2.3 From 0ee8546ac01864b6e12e65199142e00db59c9809 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 29 Apr 2014 00:51:00 +0100 Subject: IIO: core: Modify scan element type The current scan element type uses the following format: [be|le]:[s|u]bits/storagebits[>>shift]. To specify multiple elements in this type, added a repeat value. So new format is: [be|le]:[s|u]bits/storagebitsXr[>>shift]. Here r is specifying how may times, real/storage bits are repeating. When X is value is 0 or 1, then repeat value is not used in the format, and it will be same as existing format. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 41 +++++++++++++++++++++++++++++++++------ include/linux/iio/iio.h | 7 +++++++ 2 files changed, 42 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index e472cff6eeae..36b1ae92e239 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -150,7 +150,16 @@ static ssize_t iio_show_fixed_type(struct device *dev, type = IIO_BE; #endif } - return sprintf(buf, "%s:%c%d/%d>>%u\n", + if (this_attr->c->scan_type.repeat > 1) + return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", + iio_endian_prefix[type], + this_attr->c->scan_type.sign, + this_attr->c->scan_type.realbits, + this_attr->c->scan_type.storagebits, + this_attr->c->scan_type.repeat, + this_attr->c->scan_type.shift); + else + return sprintf(buf, "%s:%c%d/%d>>%u\n", iio_endian_prefix[type], this_attr->c->scan_type.sign, this_attr->c->scan_type.realbits, @@ -475,14 +484,22 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, for_each_set_bit(i, mask, indio_dev->masklength) { ch = iio_find_channel_from_si(indio_dev, i); - length = ch->scan_type.storagebits / 8; + if (ch->scan_type.repeat > 1) + length = ch->scan_type.storagebits / 8 * + ch->scan_type.repeat; + else + length = ch->scan_type.storagebits / 8; bytes = ALIGN(bytes, length); bytes += length; } if (timestamp) { ch = iio_find_channel_from_si(indio_dev, indio_dev->scan_index_timestamp); - length = ch->scan_type.storagebits / 8; + if (ch->scan_type.repeat > 1) + length = ch->scan_type.storagebits / 8 * + ch->scan_type.repeat; + else + length = ch->scan_type.storagebits / 8; bytes = ALIGN(bytes, length); bytes += length; } @@ -959,7 +976,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, indio_dev->masklength, in_ind + 1); ch = iio_find_channel_from_si(indio_dev, in_ind); - length = ch->scan_type.storagebits/8; + if (ch->scan_type.repeat > 1) + length = ch->scan_type.storagebits / 8 * + ch->scan_type.repeat; + else + length = ch->scan_type.storagebits / 8; /* Make sure we are aligned */ in_loc += length; if (in_loc % length) @@ -971,7 +992,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, goto error_clear_mux_table; } ch = iio_find_channel_from_si(indio_dev, in_ind); - length = ch->scan_type.storagebits/8; + if (ch->scan_type.repeat > 1) + length = ch->scan_type.storagebits / 8 * + ch->scan_type.repeat; + else + length = ch->scan_type.storagebits / 8; if (out_loc % length) out_loc += length - out_loc % length; if (in_loc % length) @@ -992,7 +1017,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, } ch = iio_find_channel_from_si(indio_dev, indio_dev->scan_index_timestamp); - length = ch->scan_type.storagebits/8; + if (ch->scan_type.repeat > 1) + length = ch->scan_type.storagebits / 8 * + ch->scan_type.repeat; + else + length = ch->scan_type.storagebits / 8; if (out_loc % length) out_loc += length - out_loc % length; if (in_loc % length) diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 5629c92eeadf..ccde91725f98 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -177,6 +177,12 @@ struct iio_event_spec { * shift: Shift right by this before masking out * realbits. * endianness: little or big endian + * repeat: Number of times real/storage bits + * repeats. When the repeat element is + * more than 1, then the type element in + * sysfs will show a repeat value. + * Otherwise, the number of repetitions is + * omitted. * @info_mask_separate: What information is to be exported that is specific to * this channel. * @info_mask_shared_by_type: What information is to be exported that is shared @@ -219,6 +225,7 @@ struct iio_chan_spec { u8 realbits; u8 storagebits; u8 shift; + u8 repeat; enum iio_endian endianness; } scan_type; long info_mask_separate; -- cgit v1.2.3 From 5082f405b74ad1b69aa9595555ce55b75b59b2ec Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 29 Apr 2014 00:51:00 +0100 Subject: IIO: core: Add quaternion modifier Added quaternion in the list of supported modifiers. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 1 + include/linux/iio/types.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 59540859bfae..de8b1c2ed4b4 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -84,6 +84,7 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_LIGHT_RED] = "red", [IIO_MOD_LIGHT_GREEN] = "green", [IIO_MOD_LIGHT_BLUE] = "blue", + [IIO_MOD_QUATERNION] = "quaternion", }; /* relies on pairs of these shared then separate */ diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index a13c2241abce..4fdab2e843b4 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -53,6 +53,7 @@ enum iio_modifier { IIO_MOD_LIGHT_RED, IIO_MOD_LIGHT_GREEN, IIO_MOD_LIGHT_BLUE, + IIO_MOD_QUATERNION, }; enum iio_event_type { -- cgit v1.2.3 From fc18dddc0625cd1fdf6a823e85138ff05848a85f Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 29 Apr 2014 00:51:00 +0100 Subject: iio: hid-sensors: Added device rotation support Added usage id processing for device rotation. This uses IIO interfaces for triggered buffer to present data to user mode.This uses HID sensor framework for registering callback events from the sensor hub. Data is exported to user space in the form of quaternion rotation format. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- drivers/iio/orientation/Kconfig | 12 + drivers/iio/orientation/Makefile | 1 + drivers/iio/orientation/hid-sensor-rotation.c | 348 ++++++++++++++++++++++++++ include/linux/hid-sensor-ids.h | 1 + 4 files changed, 362 insertions(+) create mode 100644 drivers/iio/orientation/hid-sensor-rotation.c (limited to 'include/linux') diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig index 58c62c837e12..e3aa1e58d920 100644 --- a/drivers/iio/orientation/Kconfig +++ b/drivers/iio/orientation/Kconfig @@ -16,4 +16,16 @@ config HID_SENSOR_INCLINOMETER_3D Say yes here to build support for the HID SENSOR Inclinometer 3D. +config HID_SENSOR_DEVICE_ROTATION + depends on HID_SENSOR_HUB + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Device Rotation" + help + Say yes here to build support for the HID SENSOR + device rotation. The output of a device rotation sensor + is presented using quaternion format. + endmenu diff --git a/drivers/iio/orientation/Makefile b/drivers/iio/orientation/Makefile index 2c97572ee919..4734dabbde13 100644 --- a/drivers/iio/orientation/Makefile +++ b/drivers/iio/orientation/Makefile @@ -4,3 +4,4 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_HID_SENSOR_INCLINOMETER_3D) += hid-sensor-incl-3d.o +obj-$(CONFIG_HID_SENSOR_DEVICE_ROTATION) += hid-sensor-rotation.o diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c new file mode 100644 index 000000000000..51387bbc1ce1 --- /dev/null +++ b/drivers/iio/orientation/hid-sensor-rotation.c @@ -0,0 +1,348 @@ +/* + * HID Sensors Driver + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../common/hid-sensors/hid-sensor-trigger.h" + +struct dev_rot_state { + struct hid_sensor_hub_callbacks callbacks; + struct hid_sensor_common common_attributes; + struct hid_sensor_hub_attribute_info quaternion; + u32 sampled_vals[4]; +}; + +/* Channel definitions */ +static const struct iio_chan_spec dev_rot_channels[] = { + { + .type = IIO_ROT, + .modified = 1, + .channel2 = IIO_MOD_QUATERNION, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | + BIT(IIO_CHAN_INFO_HYSTERESIS) + } +}; + +/* Adjust channel real bits based on report descriptor */ +static void dev_rot_adjust_channel_bit_mask(struct iio_chan_spec *chan, + int size) +{ + chan->scan_type.sign = 's'; + /* Real storage bits will change based on the report desc. */ + chan->scan_type.realbits = size * 8; + /* Maximum size of a sample to capture is u32 */ + chan->scan_type.storagebits = sizeof(u32) * 8; + chan->scan_type.repeat = 4; +} + +/* Channel read_raw handler */ +static int dev_rot_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int size, int *vals, int *val_len, + long mask) +{ + struct dev_rot_state *rot_state = iio_priv(indio_dev); + int ret_type; + int i; + + vals[0] = 0; + vals[1] = 0; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + if (size >= 4) { + for (i = 0; i < 4; ++i) + vals[i] = rot_state->sampled_vals[i]; + ret_type = IIO_VAL_INT_MULTIPLE; + *val_len = 4; + } else + ret_type = -EINVAL; + break; + case IIO_CHAN_INFO_SAMP_FREQ: + ret_type = hid_sensor_read_samp_freq_value( + &rot_state->common_attributes, &vals[0], &vals[1]); + break; + case IIO_CHAN_INFO_HYSTERESIS: + ret_type = hid_sensor_read_raw_hyst_value( + &rot_state->common_attributes, &vals[0], &vals[1]); + break; + default: + ret_type = -EINVAL; + break; + } + + return ret_type; +} + +/* Channel write_raw handler */ +static int dev_rot_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask) +{ + struct dev_rot_state *rot_state = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + ret = hid_sensor_write_samp_freq_value( + &rot_state->common_attributes, val, val2); + break; + case IIO_CHAN_INFO_HYSTERESIS: + ret = hid_sensor_write_raw_hyst_value( + &rot_state->common_attributes, val, val2); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct iio_info dev_rot_info = { + .driver_module = THIS_MODULE, + .read_raw_multi = &dev_rot_read_raw, + .write_raw = &dev_rot_write_raw, +}; + +/* Function to push data to buffer */ +static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) +{ + dev_dbg(&indio_dev->dev, "hid_sensor_push_data >>\n"); + iio_push_to_buffers(indio_dev, (u8 *)data); + dev_dbg(&indio_dev->dev, "hid_sensor_push_data <<\n"); + +} + +/* Callback handler to send event after all samples are received and captured */ +static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev, + unsigned usage_id, + void *priv) +{ + struct iio_dev *indio_dev = platform_get_drvdata(priv); + struct dev_rot_state *rot_state = iio_priv(indio_dev); + + dev_dbg(&indio_dev->dev, "dev_rot_proc_event [%d]\n", + rot_state->common_attributes.data_ready); + + if (rot_state->common_attributes.data_ready) + hid_sensor_push_data(indio_dev, + (u8 *)rot_state->sampled_vals, + sizeof(rot_state->sampled_vals)); + + return 0; +} + +/* Capture samples in local storage */ +static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev, + unsigned usage_id, + size_t raw_len, char *raw_data, + void *priv) +{ + struct iio_dev *indio_dev = platform_get_drvdata(priv); + struct dev_rot_state *rot_state = iio_priv(indio_dev); + + if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) { + memcpy(rot_state->sampled_vals, raw_data, + sizeof(rot_state->sampled_vals)); + dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len, + sizeof(rot_state->sampled_vals)); + } + + return 0; +} + +/* Parse report which is specific to an usage id*/ +static int dev_rot_parse_report(struct platform_device *pdev, + struct hid_sensor_hub_device *hsdev, + struct iio_chan_spec *channels, + unsigned usage_id, + struct dev_rot_state *st) +{ + int ret; + + ret = sensor_hub_input_get_attribute_info(hsdev, + HID_INPUT_REPORT, + usage_id, + HID_USAGE_SENSOR_ORIENT_QUATERNION, + &st->quaternion); + if (ret) + return ret; + + dev_rot_adjust_channel_bit_mask(&channels[0], + st->quaternion.size / 4); + + dev_dbg(&pdev->dev, "dev_rot %x:%x\n", st->quaternion.index, + st->quaternion.report_id); + + dev_dbg(&pdev->dev, "dev_rot: attrib size %d\n", + st->quaternion.size); + + /* Set Sensitivity field ids, when there is no individual modifier */ + if (st->common_attributes.sensitivity.index < 0) { + sensor_hub_input_get_attribute_info(hsdev, + HID_FEATURE_REPORT, usage_id, + HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS | + HID_USAGE_SENSOR_DATA_ORIENTATION, + &st->common_attributes.sensitivity); + dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n", + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } + + return 0; +} + +/* Function to initialize the processing for usage id */ +static int hid_dev_rot_probe(struct platform_device *pdev) +{ + int ret; + static char *name = "dev_rotation"; + struct iio_dev *indio_dev; + struct dev_rot_state *rot_state; + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_chan_spec *channels; + + indio_dev = devm_iio_device_alloc(&pdev->dev, + sizeof(struct dev_rot_state)); + if (indio_dev == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, indio_dev); + + rot_state = iio_priv(indio_dev); + rot_state->common_attributes.hsdev = hsdev; + rot_state->common_attributes.pdev = pdev; + + ret = hid_sensor_parse_common_attributes(hsdev, + HID_USAGE_SENSOR_DEVICE_ORIENTATION, + &rot_state->common_attributes); + if (ret) { + dev_err(&pdev->dev, "failed to setup common attributes\n"); + return ret; + } + + channels = devm_kmemdup(&pdev->dev, dev_rot_channels, + sizeof(dev_rot_channels), GFP_KERNEL); + if (!channels) { + dev_err(&pdev->dev, "failed to duplicate channels\n"); + return -ENOMEM; + } + + ret = dev_rot_parse_report(pdev, hsdev, channels, + HID_USAGE_SENSOR_DEVICE_ORIENTATION, rot_state); + if (ret) { + dev_err(&pdev->dev, "failed to setup attributes\n"); + return ret; + } + + indio_dev->channels = channels; + indio_dev->num_channels = ARRAY_SIZE(dev_rot_channels); + indio_dev->dev.parent = &pdev->dev; + indio_dev->info = &dev_rot_info; + indio_dev->name = name; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + NULL, NULL); + if (ret) { + dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); + return ret; + } + rot_state->common_attributes.data_ready = false; + ret = hid_sensor_setup_trigger(indio_dev, name, + &rot_state->common_attributes); + if (ret) { + dev_err(&pdev->dev, "trigger setup failed\n"); + goto error_unreg_buffer_funcs; + } + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(&pdev->dev, "device register failed\n"); + goto error_remove_trigger; + } + + rot_state->callbacks.send_event = dev_rot_proc_event; + rot_state->callbacks.capture_sample = dev_rot_capture_sample; + rot_state->callbacks.pdev = pdev; + ret = sensor_hub_register_callback(hsdev, + HID_USAGE_SENSOR_DEVICE_ORIENTATION, + &rot_state->callbacks); + if (ret) { + dev_err(&pdev->dev, "callback reg failed\n"); + goto error_iio_unreg; + } + + return 0; + +error_iio_unreg: + iio_device_unregister(indio_dev); +error_remove_trigger: + hid_sensor_remove_trigger(&rot_state->common_attributes); +error_unreg_buffer_funcs: + iio_triggered_buffer_cleanup(indio_dev); + return ret; +} + +/* Function to deinitialize the processing for usage id */ +static int hid_dev_rot_remove(struct platform_device *pdev) +{ + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct dev_rot_state *rot_state = iio_priv(indio_dev); + + sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_DEVICE_ORIENTATION); + iio_device_unregister(indio_dev); + hid_sensor_remove_trigger(&rot_state->common_attributes); + iio_triggered_buffer_cleanup(indio_dev); + + return 0; +} + +static struct platform_device_id hid_dev_rot_ids[] = { + { + /* Format: HID-SENSOR-usage_id_in_hex_lowercase */ + .name = "HID-SENSOR-20008a", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, hid_dev_rot_ids); + +static struct platform_driver hid_dev_rot_platform_driver = { + .id_table = hid_dev_rot_ids, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = hid_dev_rot_probe, + .remove = hid_dev_rot_remove, +}; +module_platform_driver(hid_dev_rot_platform_driver); + +MODULE_DESCRIPTION("HID Sensor Device Rotation"); +MODULE_AUTHOR("Srinivas Pandruvada "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 14ead9e8eda8..109f0e633e01 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -76,6 +76,7 @@ #define HID_USAGE_SENSOR_ORIENT_TILT_Y 0x200480 #define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481 +#define HID_USAGE_SENSOR_DEVICE_ORIENTATION 0x20008A #define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482 #define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484 -- cgit v1.2.3 From 27e289dce29764e488c1e13e9aa6950cad1f4aab Mon Sep 17 00:00:00 2001 From: Stratos Karafotis Date: Fri, 25 Apr 2014 23:15:23 +0300 Subject: cpufreq: Introduce macros for cpufreq_frequency_table iteration Many cpufreq drivers need to iterate over the cpufreq_frequency_table for various tasks. This patch introduces two macros which can be used for iteration over cpufreq_frequency_table keeping a common coding style across drivers: - cpufreq_for_each_entry: iterate over each entry of the table - cpufreq_for_each_valid_entry: iterate over each entry that contains a valid frequency. It should have no functional changes. Signed-off-by: Stratos Karafotis Acked-by: Lad, Prabhakar Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- Documentation/cpu-freq/cpu-drivers.txt | 19 +++++++++++++++++++ drivers/cpufreq/cpufreq.c | 11 +++++++++++ include/linux/cpufreq.h | 21 +++++++++++++++++++++ 3 files changed, 51 insertions(+) (limited to 'include/linux') diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index 48da5fdcb9f1..b045fe54986a 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt @@ -228,3 +228,22 @@ is the corresponding frequency table helper for the ->target stage. Just pass the values to this function, and the unsigned int index returns the number of the frequency table entry which contains the frequency the CPU shall be set to. + +The following macros can be used as iterators over cpufreq_frequency_table: + +cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency +table. + +cpufreq-for_each_valid_entry(pos, table) - iterates over all entries, +excluding CPUFREQ_ENTRY_INVALID frequencies. +Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and +"table" - the cpufreq_frequency_table * you want to iterate over. + +For example: + + struct cpufreq_frequency_table *pos, *driver_freq_table; + + cpufreq_for_each_entry(pos, driver_freq_table) { + /* Do something with pos */ + pos->frequency = ... + } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index abda6609d3e7..a517da996aaf 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -237,6 +237,17 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); +bool cpufreq_next_valid(struct cpufreq_frequency_table **pos) +{ + while ((*pos)->frequency != CPUFREQ_TABLE_END) + if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID) + return true; + else + (*pos)++; + return false; +} +EXPORT_SYMBOL_GPL(cpufreq_next_valid); + /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 5ae5100c1f24..77a5fa191502 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -468,6 +468,27 @@ struct cpufreq_frequency_table { * order */ }; +bool cpufreq_next_valid(struct cpufreq_frequency_table **pos); + +/* + * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table + * @pos: the cpufreq_frequency_table * to use as a loop cursor. + * @table: the cpufreq_frequency_table * to iterate over. + */ + +#define cpufreq_for_each_entry(pos, table) \ + for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) + +/* + * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table + * excluding CPUFREQ_ENTRY_INVALID frequencies. + * @pos: the cpufreq_frequency_table * to use as a loop cursor. + * @table: the cpufreq_frequency_table * to iterate over. + */ + +#define cpufreq_for_each_valid_entry(pos, table) \ + for (pos = table; cpufreq_next_valid(&pos); pos++) + int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); -- cgit v1.2.3 From cdae05a0f0f7d15837dfd6f4200e8caea03c9cbf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 28 Apr 2014 10:49:43 +0000 Subject: dmaengine: edma: Make reading the position of active channels work As Joel pointed out, edma_read_position() uses memcpy_fromio() to read the parameter ram. That's not synchronized with the internal update as it does a byte by byte copy. We need to do a 32bit read to get a consistent value. Further reading destination and source is pointless. In DEV_TO_MEM transfers we are only interested in the destination, in MEM_TO_DEV we care about the source. In MEM_TO_MEM it really does not matter which one you read. Simple solution: Remove the pointers, select dest/source via a bool and return the read value. Remove the export of this function while at it. The only potential user is the dmaengine and that's always builtin. Signed-off-by: Thomas Gleixner Acked-by: Sekhar Nori Signed-off-by: Joel Fernandes Signed-off-by: Vinod Koul --- arch/arm/common/edma.c | 24 +++++++++--------------- include/linux/platform_data/edma.h | 2 +- 2 files changed, 10 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 0b37f7734d0f..25fa735abc6c 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -994,29 +994,23 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, EXPORT_SYMBOL(edma_set_dest); /** - * edma_get_position - returns the current transfer points + * edma_get_position - returns the current transfer point * @slot: parameter RAM slot being examined - * @src: pointer to source port position - * @dst: pointer to destination port position + * @dst: true selects the dest position, false the source * - * Returns current source and destination addresses for a particular - * parameter RAM slot. Its channel should not be active when this is called. + * Returns the position of the current active slot */ -void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) +dma_addr_t edma_get_position(unsigned slot, bool dst) { - struct edmacc_param temp; - unsigned ctlr; + u32 offs, ctlr = EDMA_CTLR(slot); - ctlr = EDMA_CTLR(slot); slot = EDMA_CHAN_SLOT(slot); - edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); - if (src != NULL) - *src = temp.src; - if (dst != NULL) - *dst = temp.dst; + offs = PARM_OFFSET(slot); + offs += dst ? PARM_DST : PARM_SRC; + + return edma_read(ctlr, offs); } -EXPORT_SYMBOL(edma_get_position); /** * edma_set_src_index - configure DMA source address indexing diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 923f8a3e4ce0..12f134b1493c 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -130,7 +130,7 @@ void edma_set_src(unsigned slot, dma_addr_t src_port, enum address_mode mode, enum fifo_width); void edma_set_dest(unsigned slot, dma_addr_t dest_port, enum address_mode mode, enum fifo_width); -void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst); +dma_addr_t edma_get_position(unsigned slot, bool dst); void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, -- cgit v1.2.3 From ccf3356e6b3d2802ea452c0091314605a9e7b7a0 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 3 Apr 2014 13:38:32 -0500 Subject: of/fdt: consolidate built-in dtb section variables Unify the various architectures __dtb_start and __dtb_end definitions moving them into of_fdt.h. Signed-off-by: Rob Herring Acked-by: Vineet Gupta Acked-by: James Hogan Tested-by: Michal Simek Cc: Ralf Baechle Cc: Jonas Bonn Cc: Chris Zankel Cc: Max Filippov Cc: linux-metag@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux@lists.openrisc.net Cc: linux-xtensa@linux-xtensa.org Tested-by: Grant Likely Tested-by: Stephen Chivers --- arch/arc/include/asm/sections.h | 1 - arch/metag/kernel/setup.c | 4 ---- arch/mips/include/asm/mips-boards/generic.h | 2 -- arch/mips/lantiq/prom.h | 2 -- arch/mips/netlogic/xlp/dt.c | 2 +- arch/mips/ralink/of.c | 2 -- arch/openrisc/kernel/vmlinux.h | 2 -- arch/xtensa/kernel/setup.c | 1 - include/linux/of_fdt.h | 3 +++ 9 files changed, 4 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h index 764f1e3ba752..09db952e14bd 100644 --- a/arch/arc/include/asm/sections.h +++ b/arch/arc/include/asm/sections.h @@ -12,6 +12,5 @@ #include extern char __arc_dccm_base[]; -extern char __dtb_start[]; #endif diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 129c7cdda1ce..31cf53d0eba2 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -105,10 +105,6 @@ extern char _heap_start[]; -#ifdef CONFIG_METAG_BUILTIN_DTB -extern u32 __dtb_start[]; -#endif - #ifdef CONFIG_DA_CONSOLE /* Our early channel based console driver */ extern struct console dash_console; diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index b969491aa98d..c904c24550f6 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -67,8 +67,6 @@ extern int mips_revision_sconid; -extern char __dtb_start[]; - #ifdef CONFIG_PCI extern void mips_pcibios_init(void); #else diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h index 69a4c582338d..bfd2d58c1d69 100644 --- a/arch/mips/lantiq/prom.h +++ b/arch/mips/lantiq/prom.h @@ -26,6 +26,4 @@ struct ltq_soc_info { extern void ltq_soc_detect(struct ltq_soc_info *i); extern void ltq_soc_init(void); -extern char __dtb_start[]; - #endif diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c index 7f9615a712fb..bdde33147bce 100644 --- a/arch/mips/netlogic/xlp/dt.c +++ b/arch/mips/netlogic/xlp/dt.c @@ -42,7 +42,7 @@ #include extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], - __dtb_xlp_fvp_begin[], __dtb_xlp_gvp_begin[], __dtb_start[]; + __dtb_xlp_fvp_begin[], __dtb_xlp_gvp_begin[]; static void *xlp_fdt_blob; void __init *xlp_dt_init(void *fdtp) diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 91d7060d5aea..251395210e23 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -28,8 +28,6 @@ __iomem void *rt_sysc_membase; __iomem void *rt_memc_membase; -extern char __dtb_start[]; - __iomem void *plat_of_remap_node(const char *node) { struct resource res; diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h index 70b9ce41835c..bbcdf21b0b35 100644 --- a/arch/openrisc/kernel/vmlinux.h +++ b/arch/openrisc/kernel/vmlinux.h @@ -5,6 +5,4 @@ extern char __initrd_start, __initrd_end; #endif -extern u32 __dtb_start[]; - #endif diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 84fe931bb60e..89986e55d594 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -73,7 +73,6 @@ extern int initrd_below_start_ok; #endif #ifdef CONFIG_OF -extern u32 __dtb_start[]; void *dtb_start = __dtb_start; #endif diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index ddd7219af8ac..d4d0efe534b9 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -80,6 +80,9 @@ extern int __initdata dt_root_addr_cells; extern int __initdata dt_root_size_cells; extern struct boot_param_header *initial_boot_params; +extern char __dtb_start[]; +extern char __dtb_end[]; + /* For scanning the flat device-tree at boot time */ extern char *find_flat_dt_string(u32 offset); extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, -- cgit v1.2.3 From bba04d965d06abbbe10afd3687742389107e198e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Sat, 29 Mar 2014 14:14:17 -0500 Subject: of/fdt: remove unused of_scan_flat_dt_by_path of_scan_flat_dt_by_path is unused anywhere in the kernel, so remove it. Signed-off-by: Rob Herring Tested-by: Michal Simek Tested-by: Grant Likely Tested-by: Stephen Chivers --- drivers/of/fdt.c | 67 -------------------------------------------------- include/linux/of_fdt.h | 3 --- 2 files changed, 70 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 63bdcee473fa..9c8535291909 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -671,73 +671,6 @@ struct fdt_scan_status { void *data; }; -/** - * fdt_scan_node_by_path - iterator for of_scan_flat_dt_by_path function - */ -static int __init fdt_scan_node_by_path(unsigned long node, const char *uname, - int depth, void *data) -{ - struct fdt_scan_status *st = data; - - /* - * if scan at the requested fdt node has been completed, - * return -ENXIO to abort further scanning - */ - if (depth <= st->depth) - return -ENXIO; - - /* requested fdt node has been found, so call iterator function */ - if (st->found) - return st->iterator(node, uname, depth, st->data); - - /* check if scanning automata is entering next level of fdt nodes */ - if (depth == st->depth + 1 && - strncmp(st->name, uname, st->namelen) == 0 && - uname[st->namelen] == 0) { - st->depth += 1; - if (st->name[st->namelen] == 0) { - st->found = 1; - } else { - const char *next = st->name + st->namelen + 1; - st->name = next; - st->namelen = strcspn(next, "/"); - } - return 0; - } - - /* scan next fdt node */ - return 0; -} - -/** - * of_scan_flat_dt_by_path - scan flattened tree blob and call callback on each - * child of the given path. - * @path: path to start searching for children - * @it: callback function - * @data: context data pointer - * - * This function is used to scan the flattened device-tree starting from the - * node given by path. It is used to extract information (like reserved - * memory), which is required on ealy boot before we can unflatten the tree. - */ -int __init of_scan_flat_dt_by_path(const char *path, - int (*it)(unsigned long node, const char *name, int depth, void *data), - void *data) -{ - struct fdt_scan_status st = {path, 0, -1, 0, it, data}; - int ret = 0; - - if (initial_boot_params) - ret = of_scan_flat_dt(fdt_scan_node_by_path, &st); - - if (!st.found) - return -ENOENT; - else if (ret == -ENXIO) /* scan has been completed */ - return 0; - else - return ret; -} - const char * __init of_flat_dt_get_machine_name(void) { const char *name; diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index d4d0efe534b9..991ec74b4e11 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -93,9 +93,6 @@ extern void *of_get_flat_dt_prop(unsigned long node, const char *name, extern int of_flat_dt_is_compatible(unsigned long node, const char *name); extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); -extern int of_scan_flat_dt_by_path(const char *path, - int (*it)(unsigned long node, const char *name, int depth, void *data), - void *data); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data); -- cgit v1.2.3 From 9d0c4dfedd96ee54fc075b16d02f82499c8cc3a6 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 1 Apr 2014 23:49:03 -0500 Subject: of/fdt: update of_get_flat_dt_prop in prep for libfdt Make of_get_flat_dt_prop arguments compatible with libfdt fdt_getprop call in preparation to convert FDT code to use libfdt. Make the return value const and the property length ptr type an int. Signed-off-by: Rob Herring Tested-by: Michal Simek Tested-by: Grant Likely Tested-by: Stephen Chivers --- arch/arc/kernel/devtree.c | 2 +- arch/arm/kernel/devtree.c | 2 +- arch/arm/mach-exynos/exynos.c | 2 +- arch/arm/mach-vexpress/platsmp.c | 2 +- arch/arm/plat-samsung/s5p-dev-mfc.c | 4 ++-- arch/microblaze/kernel/prom.c | 8 +++---- arch/powerpc/kernel/epapr_paravirt.c | 2 +- arch/powerpc/kernel/fadump.c | 4 ++-- arch/powerpc/kernel/prom.c | 24 +++++++++++---------- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/mm/hash_utils_64.c | 22 +++++++++---------- arch/powerpc/platforms/52xx/efika.c | 4 ++-- arch/powerpc/platforms/chrp/setup.c | 4 ++-- arch/powerpc/platforms/powernv/opal.c | 12 +++++------ arch/powerpc/platforms/pseries/setup.c | 4 ++-- arch/xtensa/kernel/setup.c | 2 +- drivers/of/fdt.c | 39 +++++++++++++++++----------------- drivers/of/of_reserved_mem.c | 4 ++-- include/linux/of_fdt.h | 8 +++---- 19 files changed, 77 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index b6dc4e21fd32..0b3ef4025d89 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c @@ -42,7 +42,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt) const struct machine_desc *mdesc; unsigned long dt_root; void *clk; - unsigned long len; + int len; if (!early_init_dt_scan(dt)) return NULL; diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index dff9cc0e9bd6..38f4711b4995 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -247,7 +247,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) if (!mdesc) { const char *prop; - long size; + int size; unsigned long dt_root; early_print("\nError: unrecognized/unsupported " diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index b32a907d021d..77293d39dfc9 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -250,7 +250,7 @@ static int __init exynos_fdt_map_chipid(unsigned long node, const char *uname, { struct map_desc iodesc; __be32 *reg; - unsigned long len; + int len; if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid") && !of_flat_dt_is_compatible(node, "samsung,exynos5440-clock")) diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c index 993c9ae5dc5e..b4a5f0d8390d 100644 --- a/arch/arm/mach-vexpress/platsmp.c +++ b/arch/arm/mach-vexpress/platsmp.c @@ -53,7 +53,7 @@ static int __init vexpress_dt_find_scu(unsigned long node, { if (of_flat_dt_match(node, vexpress_dt_cortex_a9_match)) { phys_addr_t phys_addr; - __be32 *reg = of_get_flat_dt_prop(node, "reg", NULL); + const __be32 *reg = of_get_flat_dt_prop(node, "reg", NULL); if (WARN_ON(!reg)) return -EINVAL; diff --git a/arch/arm/plat-samsung/s5p-dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c index 98087b655df0..469b86260fe3 100644 --- a/arch/arm/plat-samsung/s5p-dev-mfc.c +++ b/arch/arm/plat-samsung/s5p-dev-mfc.c @@ -125,8 +125,8 @@ device_initcall(s5p_mfc_memory_init); int __init s5p_fdt_alloc_mfc_mem(unsigned long node, const char *uname, int depth, void *data) { - __be32 *prop; - unsigned long len; + const __be32 *prop; + int len; struct s5p_mfc_dt_meminfo mfc_mem; if (!data) diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index abdfb10e7eca..c76630603058 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -43,13 +43,13 @@ #include #ifdef CONFIG_EARLY_PRINTK -static char *stdout; +static const char *stdout; static int __init early_init_dt_scan_chosen_serial(unsigned long node, const char *uname, int depth, void *data) { - unsigned long l; - char *p; + int l; + const char *p; pr_debug("%s: depth: %d, uname: %s\n", __func__, depth, uname); @@ -80,7 +80,7 @@ static int __init early_init_dt_scan_chosen_serial(unsigned long node, (strncmp(p, "xlnx,opb-uartlite", 17) == 0) || (strncmp(p, "xlnx,axi-uartlite", 17) == 0) || (strncmp(p, "xlnx,mdm", 8) == 0)) { - unsigned int *addrp; + const unsigned int *addrp; *(u32 *)data = UARTLITE; diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 7898be90f2dc..d64e92b22dd8 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -36,7 +36,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node, int depth, void *data) { const u32 *insts; - unsigned long len; + int len; int i; insts = of_get_flat_dt_prop(node, "hcall-instructions", &len); diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 2230fd0ca3e4..7213d930918d 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -55,9 +55,9 @@ int crash_mem_ranges; int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { - __be32 *sections; + const __be32 *sections; int i, num_sections; - unsigned long size; + int size; const int *token; if (depth != 1 || strcmp(uname, "rtas") != 0) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d65754935652..483273e5c3e0 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -163,7 +163,7 @@ static struct ibm_pa_feature { {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, }; -static void __init scan_features(unsigned long node, unsigned char *ftrs, +static void __init scan_features(unsigned long node, const unsigned char *ftrs, unsigned long tablelen, struct ibm_pa_feature *fp, unsigned long ft_size) @@ -202,8 +202,8 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs, static void __init check_cpu_pa_features(unsigned long node) { - unsigned char *pa_ftrs; - unsigned long tablelen; + const unsigned char *pa_ftrs; + int tablelen; pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); if (pa_ftrs == NULL) @@ -216,7 +216,7 @@ static void __init check_cpu_pa_features(unsigned long node) #ifdef CONFIG_PPC_STD_MMU_64 static void __init check_cpu_slb_size(unsigned long node) { - __be32 *slb_size_ptr; + const __be32 *slb_size_ptr; slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL); if (slb_size_ptr != NULL) { @@ -257,7 +257,7 @@ static struct feature_property { static inline void identical_pvr_fixup(unsigned long node) { unsigned int pvr; - char *model = of_get_flat_dt_prop(node, "model", NULL); + const char *model = of_get_flat_dt_prop(node, "model", NULL); /* * Since 440GR(x)/440EP(x) processors have the same pvr, @@ -295,11 +295,11 @@ static int __init early_init_dt_scan_cpus(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; const __be32 *intserv; int i, nthreads; - unsigned long len; + int len; int found = -1; int found_thread = 0; @@ -392,7 +392,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, int depth, void *data) { - unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ + const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ /* Use common scan routine to determine if this is the chosen node */ if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) @@ -443,8 +443,9 @@ int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, */ static int __init early_init_dt_scan_drconf_memory(unsigned long node) { - __be32 *dm, *ls, *usm; - unsigned long l, n, flags; + const __be32 *dm, *ls, *usm; + int l; + unsigned long n, flags; u64 base, size, memblock_size; unsigned int is_kexec_kdump = 0, rngs; @@ -564,7 +565,8 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) static void __init early_reserve_mem_dt(void) { - unsigned long i, len, dt_root; + unsigned long i, dt_root; + int len; const __be32 *prop; early_init_fdt_scan_reserved_mem(); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8cd5ed049b5d..8b4c857c1421 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1142,7 +1142,7 @@ void __init rtas_initialize(void) int __init early_init_dt_scan_rtas(unsigned long node, const char *uname, int depth, void *data) { - u32 *basep, *entryp, *sizep; + const u32 *basep, *entryp, *sizep; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d766d6ee33fe..59cc19a23a7a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -265,9 +265,9 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - __be32 *prop; - unsigned long size = 0; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; + int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) @@ -320,9 +320,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - __be32 *prop; - unsigned long size = 0; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; + int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) @@ -402,9 +402,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, static int __init htab_dt_scan_hugepage_blocks(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - __be64 *addr_prop; - __be32 *page_count_prop; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be64 *addr_prop; + const __be32 *page_count_prop; unsigned int expected_pages; long unsigned int phys_addr; long unsigned int block_size; @@ -546,8 +546,8 @@ static int __init htab_dt_scan_pftsize(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - __be32 *prop; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 18c104820198..6e19b0ad5d26 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -199,8 +199,8 @@ static void __init efika_setup_arch(void) static int __init efika_probe(void) { - char *model = of_get_flat_dt_prop(of_get_flat_dt_root(), - "model", NULL); + const char *model = of_get_flat_dt_prop(of_get_flat_dt_root(), + "model", NULL); if (model == NULL) return 0; diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index c665d7de6c99..7044fd36197b 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -574,8 +574,8 @@ chrp_init2(void) static int __init chrp_probe(void) { - char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), - "device_type", NULL); + const char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), + "device_type", NULL); if (dtype == NULL) return 0; if (strcmp(dtype, "chrp")) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 49d2f00019e5..c1329846bfa3 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -61,7 +61,7 @@ int __init early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data) { const void *basep, *entryp, *sizep; - unsigned long basesz, entrysz, runtimesz; + int basesz, entrysz, runtimesz; if (depth != 1 || strcmp(uname, "ibm,opal") != 0) return 0; @@ -77,11 +77,11 @@ int __init early_init_dt_scan_opal(unsigned long node, opal.entry = of_read_number(entryp, entrysz/4); opal.size = of_read_number(sizep, runtimesz/4); - pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n", + pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n", opal.base, basep, basesz); - pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n", + pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n", opal.entry, entryp, entrysz); - pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n", + pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", opal.size, sizep, runtimesz); powerpc_firmware_features |= FW_FEATURE_OPAL; @@ -102,7 +102,7 @@ int __init early_init_dt_scan_opal(unsigned long node, int __init early_init_dt_scan_recoverable_ranges(unsigned long node, const char *uname, int depth, void *data) { - unsigned long i, psize, size; + int i, psize, size; const __be32 *prop; if (depth != 1 || strcmp(uname, "ibm,opal") != 0) @@ -359,7 +359,7 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count) if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) return 0; len = cpu_to_be64(count); - rc = opal_console_read(vtermno, &len, buf); + rc = opal_console_read(vtermno, &len, buf); if (rc == OPAL_SUCCESS) return be64_to_cpu(len); return 0; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 2db8cc691bf4..099d2df976a2 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -665,7 +665,7 @@ static int __init pseries_probe_fw_features(unsigned long node, void *data) { const char *prop; - unsigned long len; + int len; static int hypertas_found; static int vec5_found; @@ -698,7 +698,7 @@ static int __init pseries_probe_fw_features(unsigned long node, static int __init pSeries_probe(void) { unsigned long root = of_get_flat_dt_root(); - char *dtype = of_get_flat_dt_prop(root, "device_type", NULL); + const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL); if (dtype == NULL) return 0; diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 89986e55d594..1991a3d0b2f8 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -220,7 +220,7 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, int depth, void *data) { const __be32 *ranges; - unsigned long len; + int len; if (depth > 1) return 0; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 9c8535291909..1d1582bb81fb 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -35,7 +35,7 @@ char *of_fdt_get_string(struct boot_param_header *blob, u32 offset) */ void *of_fdt_get_property(struct boot_param_header *blob, unsigned long node, const char *name, - unsigned long *size) + int *size) { unsigned long p = node; @@ -85,7 +85,8 @@ int of_fdt_is_compatible(struct boot_param_header *blob, unsigned long node, const char *compat) { const char *cp; - unsigned long cplen, l, score = 0; + int cplen; + unsigned long l, score = 0; cp = of_fdt_get_property(blob, node, "compatible", &cplen); if (cp == NULL) @@ -444,8 +445,8 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, { int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); phys_addr_t base, size; - unsigned long len; - __be32 *prop; + int len; + const __be32 *prop; int nomap, first = 1; prop = of_get_flat_dt_prop(node, "reg", &len); @@ -488,7 +489,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, */ static int __init __reserved_mem_check_root(unsigned long node) { - __be32 *prop; + const __be32 *prop; prop = of_get_flat_dt_prop(node, "#size-cells", NULL); if (!prop || be32_to_cpup(prop) != dt_root_size_cells) @@ -638,8 +639,8 @@ unsigned long __init of_get_flat_dt_root(void) * This function can be used within scan_flattened_dt callback to get * access to properties */ -void *__init of_get_flat_dt_prop(unsigned long node, const char *name, - unsigned long *size) +const void *__init of_get_flat_dt_prop(unsigned long node, const char *name, + int *size) { return of_fdt_get_property(initial_boot_params, node, name, size); } @@ -710,7 +711,7 @@ const void * __init of_flat_dt_match_machine(const void *default_match, } if (!best_data) { const char *prop; - long size; + int size; pr_err("\n unrecognized device tree list:\n[ "); @@ -739,8 +740,8 @@ const void * __init of_flat_dt_match_machine(const void *default_match, static void __init early_init_dt_check_for_initrd(unsigned long node) { u64 start, end; - unsigned long len; - __be32 *prop; + int len; + const __be32 *prop; pr_debug("Looking for initrd properties... "); @@ -773,7 +774,7 @@ static inline void early_init_dt_check_for_initrd(unsigned long node) int __init early_init_dt_scan_root(unsigned long node, const char *uname, int depth, void *data) { - __be32 *prop; + const __be32 *prop; if (depth != 0) return 0; @@ -795,9 +796,9 @@ int __init early_init_dt_scan_root(unsigned long node, const char *uname, return 1; } -u64 __init dt_mem_next_cell(int s, __be32 **cellp) +u64 __init dt_mem_next_cell(int s, const __be32 **cellp) { - __be32 *p = *cellp; + const __be32 *p = *cellp; *cellp = p + s; return of_read_number(p, s); @@ -809,9 +810,9 @@ u64 __init dt_mem_next_cell(int s, __be32 **cellp) int __init early_init_dt_scan_memory(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - __be32 *reg, *endp; - unsigned long l; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *reg, *endp; + int l; /* We are scanning "memory" nodes only */ if (type == NULL) { @@ -832,7 +833,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, endp = reg + (l / sizeof(__be32)); - pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", + pr_debug("memory scan node %s, reg size %d, data: %x %x %x %x,\n", uname, l, reg[0], reg[1], reg[2], reg[3]); while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { @@ -855,8 +856,8 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data) { - unsigned long l; - char *p; + int l; + const char *p; pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index daaaf935911d..e420eb52e5c9 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -95,8 +95,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node, int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); phys_addr_t start = 0, end = 0; phys_addr_t base = 0, align = 0, size; - unsigned long len; - __be32 *prop; + int len; + const __be32 *prop; int nomap; int ret; diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 991ec74b4e11..b36a50d6af37 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -66,7 +66,7 @@ extern char *of_fdt_get_string(struct boot_param_header *blob, u32 offset); extern void *of_fdt_get_property(struct boot_param_header *blob, unsigned long node, const char *name, - unsigned long *size); + int *size); extern int of_fdt_is_compatible(struct boot_param_header *blob, unsigned long node, const char *compat); @@ -88,8 +88,8 @@ extern char *find_flat_dt_string(u32 offset); extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), void *data); -extern void *of_get_flat_dt_prop(unsigned long node, const char *name, - unsigned long *size); +extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, + int *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); @@ -103,7 +103,7 @@ extern void early_init_dt_add_memory_arch(u64 base, u64 size); extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool no_map); extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); -extern u64 dt_mem_next_cell(int s, __be32 **cellp); +extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ extern int early_init_dt_scan_root(unsigned long node, const char *uname, -- cgit v1.2.3 From e6a6928c3ea1d0195ed75a091e345696b916c09b Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 2 Apr 2014 15:10:14 -0500 Subject: of/fdt: Convert FDT functions to use libfdt The kernel FDT functions predate libfdt and are much more limited in functionality. Also, the kernel functions and libfdt functions are not compatible with each other because they have different definitions of node offsets. To avoid this incompatibility and in preparation to add more FDT parsing functions which will need libfdt, let's first convert the existing code to use libfdt. The FDT unflattening, top-level FDT scanning, and property retrieval functions are converted to use libfdt. The scanning code should be re-worked to be more efficient and understandable by using libfdt to find nodes directly by path or compatible strings. Signed-off-by: Rob Herring Tested-by: Michal Simek Tested-by: Grant Likely Tested-by: Stephen Chivers --- drivers/of/Kconfig | 1 + drivers/of/Makefile | 2 + drivers/of/fdt.c | 209 ++++++++++++++----------------------------------- include/linux/of_fdt.h | 1 - 4 files changed, 60 insertions(+), 153 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 889005fa4d04..2dcb0541012d 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -20,6 +20,7 @@ config OF_SELFTEST config OF_FLATTREE bool select DTC + select LIBFDT config OF_EARLY_FLATTREE bool diff --git a/drivers/of/Makefile b/drivers/of/Makefile index ed9660adad77..9891232f999e 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -10,3 +10,5 @@ obj-$(CONFIG_OF_PCI) += of_pci.o obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o obj-$(CONFIG_OF_MTD) += of_mtd.o obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o + +CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 1d1582bb81fb..8e820a2b106d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -19,58 +19,11 @@ #include #include #include +#include #include /* for COMMAND_LINE_SIZE */ #include -char *of_fdt_get_string(struct boot_param_header *blob, u32 offset) -{ - return ((char *)blob) + - be32_to_cpu(blob->off_dt_strings) + offset; -} - -/** - * of_fdt_get_property - Given a node in the given flat blob, return - * the property ptr - */ -void *of_fdt_get_property(struct boot_param_header *blob, - unsigned long node, const char *name, - int *size) -{ - unsigned long p = node; - - do { - u32 tag = be32_to_cpup((__be32 *)p); - u32 sz, noff; - const char *nstr; - - p += 4; - if (tag == OF_DT_NOP) - continue; - if (tag != OF_DT_PROP) - return NULL; - - sz = be32_to_cpup((__be32 *)p); - noff = be32_to_cpup((__be32 *)(p + 4)); - p += 8; - if (be32_to_cpu(blob->version) < 0x10) - p = ALIGN(p, sz >= 8 ? 8 : 4); - - nstr = of_fdt_get_string(blob, noff); - if (nstr == NULL) { - pr_warning("Can't find property index name !\n"); - return NULL; - } - if (strcmp(name, nstr) == 0) { - if (size) - *size = sz; - return (void *)p; - } - p += sz; - p = ALIGN(p, 4); - } while (1); -} - /** * of_fdt_is_compatible - Return true if given node from the given blob has * compat in its compatible list @@ -88,7 +41,7 @@ int of_fdt_is_compatible(struct boot_param_header *blob, int cplen; unsigned long l, score = 0; - cp = of_fdt_get_property(blob, node, "compatible", &cplen); + cp = fdt_getprop(blob, node, "compatible", &cplen); if (cp == NULL) return 0; while (cplen > 0) { @@ -147,28 +100,27 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, */ static void * unflatten_dt_node(struct boot_param_header *blob, void *mem, - void **p, + int *poffset, struct device_node *dad, struct device_node ***allnextpp, unsigned long fpsize) { + const __be32 *p; struct device_node *np; struct property *pp, **prev_pp = NULL; - char *pathp; - u32 tag; + const char *pathp; unsigned int l, allocl; + static int depth = 0; + int old_depth; + int offset; int has_name = 0; int new_format = 0; - tag = be32_to_cpup(*p); - if (tag != OF_DT_BEGIN_NODE) { - pr_err("Weird tag at start of node: %x\n", tag); + pathp = fdt_get_name(blob, *poffset, &l); + if (!pathp) return mem; - } - *p += 4; - pathp = *p; - l = allocl = strlen(pathp) + 1; - *p = PTR_ALIGN(*p + l, 4); + + allocl = l++; /* version 0x10 has a more compact unit name here instead of the full * path. we accumulate the full path size using "fpsize", we'll rebuild @@ -186,7 +138,7 @@ static void * unflatten_dt_node(struct boot_param_header *blob, fpsize = 1; allocl = 2; l = 1; - *pathp = '\0'; + pathp = ""; } else { /* account for '/' and path size minus terminal 0 * already in 'l' @@ -233,32 +185,23 @@ static void * unflatten_dt_node(struct boot_param_header *blob, } } /* process properties */ - while (1) { - u32 sz, noff; - char *pname; - - tag = be32_to_cpup(*p); - if (tag == OF_DT_NOP) { - *p += 4; - continue; - } - if (tag != OF_DT_PROP) + for (offset = fdt_first_property_offset(blob, *poffset); + (offset >= 0); + (offset = fdt_next_property_offset(blob, offset))) { + const char *pname; + u32 sz; + + if (!(p = fdt_getprop_by_offset(blob, offset, &pname, &sz))) { + offset = -FDT_ERR_INTERNAL; break; - *p += 4; - sz = be32_to_cpup(*p); - noff = be32_to_cpup(*p + 4); - *p += 8; - if (be32_to_cpu(blob->version) < 0x10) - *p = PTR_ALIGN(*p, sz >= 8 ? 8 : 4); - - pname = of_fdt_get_string(blob, noff); + } + if (pname == NULL) { pr_info("Can't find property name in list !\n"); break; } if (strcmp(pname, "name") == 0) has_name = 1; - l = strlen(pname) + 1; pp = unflatten_dt_alloc(&mem, sizeof(struct property), __alignof__(struct property)); if (allnextpp) { @@ -270,26 +213,25 @@ static void * unflatten_dt_node(struct boot_param_header *blob, if ((strcmp(pname, "phandle") == 0) || (strcmp(pname, "linux,phandle") == 0)) { if (np->phandle == 0) - np->phandle = be32_to_cpup((__be32*)*p); + np->phandle = be32_to_cpup(p); } /* And we process the "ibm,phandle" property * used in pSeries dynamic device tree * stuff */ if (strcmp(pname, "ibm,phandle") == 0) - np->phandle = be32_to_cpup((__be32 *)*p); - pp->name = pname; + np->phandle = be32_to_cpup(p); + pp->name = (char *)pname; pp->length = sz; - pp->value = *p; + pp->value = (__be32 *)p; *prev_pp = pp; prev_pp = &pp->next; } - *p = PTR_ALIGN((*p) + sz, 4); } /* with version 0x10 we may not have the name property, recreate * it here from the unit name if absent */ if (!has_name) { - char *p1 = pathp, *ps = pathp, *pa = NULL; + const char *p1 = pathp, *ps = pathp, *pa = NULL; int sz; while (*p1) { @@ -326,19 +268,18 @@ static void * unflatten_dt_node(struct boot_param_header *blob, if (!np->type) np->type = ""; } - while (tag == OF_DT_BEGIN_NODE || tag == OF_DT_NOP) { - if (tag == OF_DT_NOP) - *p += 4; - else - mem = unflatten_dt_node(blob, mem, p, np, allnextpp, - fpsize); - tag = be32_to_cpup(*p); - } - if (tag != OF_DT_END_NODE) { - pr_err("Weird tag at end of node: %x\n", tag); - return mem; - } - *p += 4; + + old_depth = depth; + *poffset = fdt_next_node(blob, *poffset, &depth); + if (depth < 0) + depth = 0; + while (*poffset > 0 && depth > old_depth) + mem = unflatten_dt_node(blob, mem, poffset, np, allnextpp, + fpsize); + + if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND) + pr_err("unflatten: error %d processing FDT\n", *poffset); + return mem; } @@ -359,7 +300,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, void * (*dt_alloc)(u64 size, u64 align)) { unsigned long size; - void *start, *mem; + int start; + void *mem; struct device_node **allnextp = mynodes; pr_debug(" -> unflatten_device_tree()\n"); @@ -380,7 +322,7 @@ static void __unflatten_device_tree(struct boot_param_header *blob, } /* First pass, scan for size */ - start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct); + start = 0; size = (unsigned long)unflatten_dt_node(blob, 0, &start, NULL, NULL, 0); size = ALIGN(size, 4); @@ -395,10 +337,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, pr_debug(" unflattening %p...\n", mem); /* Second pass, do actual unflattening */ - start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct); + start = 0; unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0); - if (be32_to_cpup(start) != OF_DT_END) - pr_warning("Weird tag at end of tree: %08x\n", be32_to_cpup(start)); if (be32_to_cpup(mem + size) != 0xdeadbeef) pr_warning("End of tree marker overwritten: %08x\n", be32_to_cpup(mem + size)); @@ -574,47 +514,19 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, void *data), void *data) { - unsigned long p = ((unsigned long)initial_boot_params) + - be32_to_cpu(initial_boot_params->off_dt_struct); - int rc = 0; - int depth = -1; - - do { - u32 tag = be32_to_cpup((__be32 *)p); - const char *pathp; - - p += 4; - if (tag == OF_DT_END_NODE) { - depth--; - continue; - } - if (tag == OF_DT_NOP) - continue; - if (tag == OF_DT_END) - break; - if (tag == OF_DT_PROP) { - u32 sz = be32_to_cpup((__be32 *)p); - p += 8; - if (be32_to_cpu(initial_boot_params->version) < 0x10) - p = ALIGN(p, sz >= 8 ? 8 : 4); - p += sz; - p = ALIGN(p, 4); - continue; - } - if (tag != OF_DT_BEGIN_NODE) { - pr_err("Invalid tag %x in flat device tree!\n", tag); - return -EINVAL; - } - depth++; - pathp = (char *)p; - p = ALIGN(p + strlen(pathp) + 1, 4); + const void *blob = initial_boot_params; + const char *pathp; + int offset, rc = 0, depth = -1; + + for (offset = fdt_next_node(blob, -1, &depth); + offset >= 0 && depth >= 0 && !rc; + offset = fdt_next_node(blob, offset, &depth)) { + + pathp = fdt_get_name(blob, offset, NULL); if (*pathp == '/') pathp = kbasename(pathp); - rc = it(p, pathp, depth, data); - if (rc != 0) - break; - } while (1); - + rc = it(offset, pathp, depth, data); + } return rc; } @@ -623,14 +535,7 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, */ unsigned long __init of_get_flat_dt_root(void) { - unsigned long p = ((unsigned long)initial_boot_params) + - be32_to_cpu(initial_boot_params->off_dt_struct); - - while (be32_to_cpup((__be32 *)p) == OF_DT_NOP) - p += 4; - BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE); - p += 4; - return ALIGN(p + strlen((char *)p) + 1, 4); + return 0; } /** @@ -642,7 +547,7 @@ unsigned long __init of_get_flat_dt_root(void) const void *__init of_get_flat_dt_prop(unsigned long node, const char *name, int *size) { - return of_fdt_get_property(initial_boot_params, node, name, size); + return fdt_getprop(initial_boot_params, node, name, size); } /** diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index b36a50d6af37..26cef9ac55c5 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -84,7 +84,6 @@ extern char __dtb_start[]; extern char __dtb_end[]; /* For scanning the flat device-tree at boot time */ -extern char *find_flat_dt_string(u32 offset); extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), void *data); -- cgit v1.2.3 From c972de14971f1482ab482f0a7abc85679a23326a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 1 Apr 2014 22:48:01 -0500 Subject: of/fdt: use libfdt accessors for header data With libfdt support, we can take advantage of helper accessors in libfdt for accessing the FDT header data. This makes the code more readable and makes the FDT blob structure more opaque to the kernel. This also prepares for removing struct boot_param_header completely. Signed-off-by: Rob Herring Cc: Max Filippov Tested-by: Michal Simek Tested-by: Grant Likely Tested-by: Stephen Chivers --- drivers/of/fdt.c | 26 ++++++++++++-------------- include/linux/of_fdt.h | 8 ++++---- 2 files changed, 16 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 8e820a2b106d..0b38a6aa8603 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -34,7 +34,7 @@ * On match, returns a non-zero value with smaller values returned for more * specific compatible values. */ -int of_fdt_is_compatible(struct boot_param_header *blob, +int of_fdt_is_compatible(const void *blob, unsigned long node, const char *compat) { const char *cp; @@ -59,7 +59,7 @@ int of_fdt_is_compatible(struct boot_param_header *blob, /** * of_fdt_match - Return true if node matches a list of compatible values */ -int of_fdt_match(struct boot_param_header *blob, unsigned long node, +int of_fdt_match(const void *blob, unsigned long node, const char *const *compat) { unsigned int tmp, score = 0; @@ -98,7 +98,7 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, * @allnextpp: pointer to ->allnext from last allocated device_node * @fpsize: Size of the node path up at the current depth. */ -static void * unflatten_dt_node(struct boot_param_header *blob, +static void * unflatten_dt_node(void *blob, void *mem, int *poffset, struct device_node *dad, @@ -295,7 +295,7 @@ static void * unflatten_dt_node(struct boot_param_header *blob, * @dt_alloc: An allocator that provides a virtual address to memory * for the resulting tree */ -static void __unflatten_device_tree(struct boot_param_header *blob, +static void __unflatten_device_tree(void *blob, struct device_node **mynodes, void * (*dt_alloc)(u64 size, u64 align)) { @@ -312,11 +312,11 @@ static void __unflatten_device_tree(struct boot_param_header *blob, } pr_debug("Unflattening device tree:\n"); - pr_debug("magic: %08x\n", be32_to_cpu(blob->magic)); - pr_debug("size: %08x\n", be32_to_cpu(blob->totalsize)); - pr_debug("version: %08x\n", be32_to_cpu(blob->version)); + pr_debug("magic: %08x\n", fdt_magic(blob)); + pr_debug("size: %08x\n", fdt_totalsize(blob)); + pr_debug("version: %08x\n", fdt_version(blob)); - if (be32_to_cpu(blob->magic) != OF_DT_HEADER) { + if (fdt_check_header(blob)) { pr_err("Invalid device tree blob header\n"); return; } @@ -363,9 +363,7 @@ static void *kernel_tree_alloc(u64 size, u64 align) void of_fdt_unflatten_tree(unsigned long *blob, struct device_node **mynodes) { - struct boot_param_header *device_tree = - (struct boot_param_header *)blob; - __unflatten_device_tree(device_tree, mynodes, &kernel_tree_alloc); + __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc); } EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); @@ -852,7 +850,7 @@ bool __init early_init_dt_scan(void *params) initial_boot_params = params; /* check device tree validity */ - if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) { + if (fdt_check_header(params)) { initial_boot_params = NULL; return false; } @@ -907,9 +905,9 @@ void __init unflatten_and_copy_device_tree(void) return; } - size = __be32_to_cpu(initial_boot_params->totalsize); + size = fdt_totalsize(initial_boot_params); dt = early_init_dt_alloc_memory_arch(size, - __alignof__(struct boot_param_header)); + roundup_pow_of_two(FDT_V17_SIZE)); if (dt) { memcpy(dt, initial_boot_params, size); diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 26cef9ac55c5..348dae2c8a3c 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -62,15 +62,15 @@ struct boot_param_header { struct device_node; /* For scanning an arbitrary device-tree at any time */ -extern char *of_fdt_get_string(struct boot_param_header *blob, u32 offset); -extern void *of_fdt_get_property(struct boot_param_header *blob, +extern char *of_fdt_get_string(const void *blob, u32 offset); +extern void *of_fdt_get_property(const void *blob, unsigned long node, const char *name, int *size); -extern int of_fdt_is_compatible(struct boot_param_header *blob, +extern int of_fdt_is_compatible(const void *blob, unsigned long node, const char *compat); -extern int of_fdt_match(struct boot_param_header *blob, unsigned long node, +extern int of_fdt_match(const void *blob, unsigned long node, const char *const *compat); extern void of_fdt_unflatten_tree(unsigned long *blob, struct device_node **mynodes); -- cgit v1.2.3 From c0556d3f2c3f42eaed049139ce6f0899ecdb0217 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 22 Apr 2014 12:55:10 -0500 Subject: of/fdt: introduce of_get_flat_dt_size Add a wrapper function to retrieve the FDT size from the FDT header. This is primarily to avoid libfdt include paths for the whole kernel. Signed-off-by: Rob Herring Tested-by: Grant Likely Tested-by: Stephen Chivers --- drivers/of/fdt.c | 8 ++++++++ include/linux/of_fdt.h | 1 + 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index d9e64504cda0..358bcf0500d2 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -553,6 +553,14 @@ unsigned long __init of_get_flat_dt_root(void) return 0; } +/** + * of_get_flat_dt_size - Return the total size of the FDT + */ +int __init of_get_flat_dt_size(void) +{ + return fdt_totalsize(initial_boot_params); +} + /** * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr * diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 348dae2c8a3c..e10099c95999 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -92,6 +92,7 @@ extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, extern int of_flat_dt_is_compatible(unsigned long node, const char *name); extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); +extern int of_get_flat_dt_size(void); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data); -- cgit v1.2.3 From 1daa0c4ced334f18f458aba6ace7e01e8cdc2ecf Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 31 Mar 2014 15:25:04 -0500 Subject: of/fdt: convert initial_boot_params to opaque pointer Now that all accesses to FDT header data has been converted to accessor helpers, initial_boot_params can become an opaque pointer. Signed-off-by: Rob Herring Tested-by: Michal Simek Tested-by: Grant Likely Tested-by: Stephen Chivers --- drivers/of/fdt.c | 2 +- include/linux/of_fdt.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 358bcf0500d2..a6f83ea107ae 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -372,7 +372,7 @@ EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); int __initdata dt_root_addr_cells; int __initdata dt_root_size_cells; -struct boot_param_header *initial_boot_params; +void *initial_boot_params; #ifdef CONFIG_OF_EARLY_FLATTREE diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index e10099c95999..1f882e1da728 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -78,7 +78,7 @@ extern void of_fdt_unflatten_tree(unsigned long *blob, /* TBD: Temporary export of fdt globals - remove when code fully merged */ extern int __initdata dt_root_addr_cells; extern int __initdata dt_root_size_cells; -extern struct boot_param_header *initial_boot_params; +extern void *initial_boot_params; extern char __dtb_start[]; extern char __dtb_end[]; -- cgit v1.2.3 From c3fc952d2fbe3ec78defd70cf73d5d76d27092ec Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 1 Apr 2014 22:55:14 -0500 Subject: of: push struct boot_param_header and defines into powerpc Now powerpc is the only user of struct boot_param_header and FDT defines, so they can be moved into the powerpc architecture code. Signed-off-by: Rob Herring Acked-by: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: linuxppc-dev@lists.ozlabs.org Tested-by: Grant Likely Tested-by: Stephen Chivers --- arch/powerpc/include/asm/prom.h | 39 +++++++++++++++++++++++++++++++++++++++ include/linux/of_fdt.h | 37 ------------------------------------- 2 files changed, 39 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index d977b9b78696..74b79f07f041 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -26,6 +26,45 @@ #include #include +#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ +#define OF_DT_END_NODE 0x2 /* End node */ +#define OF_DT_PROP 0x3 /* Property: name off, size, + * content */ +#define OF_DT_NOP 0x4 /* nop */ +#define OF_DT_END 0x9 + +#define OF_DT_VERSION 0x10 + +/* + * This is what gets passed to the kernel by prom_init or kexec + * + * The dt struct contains the device tree structure, full pathes and + * property contents. The dt strings contain a separate block with just + * the strings for the property names, and is fully page aligned and + * self contained in a page, so that it can be kept around by the kernel, + * each property name appears only once in this page (cheap compression) + * + * the mem_rsvmap contains a map of reserved ranges of physical memory, + * passing it here instead of in the device-tree itself greatly simplifies + * the job of everybody. It's just a list of u64 pairs (base/size) that + * ends when size is 0 + */ +struct boot_param_header { + __be32 magic; /* magic word OF_DT_HEADER */ + __be32 totalsize; /* total size of DT block */ + __be32 off_dt_struct; /* offset to structure */ + __be32 off_dt_strings; /* offset to strings */ + __be32 off_mem_rsvmap; /* offset to memory reserve map */ + __be32 version; /* format version */ + __be32 last_comp_version; /* last compatible version */ + /* version 2 fields below */ + __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */ + /* version 3 fields below */ + __be32 dt_strings_size; /* size of the DT strings block */ + /* version 17 fields below */ + __be32 dt_struct_size; /* size of the DT structure block */ +}; + /* * OF address retreival & translation */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 1f882e1da728..5c0ab057eecf 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -17,45 +17,8 @@ /* Definitions used by the flattened device tree */ #define OF_DT_HEADER 0xd00dfeed /* marker */ -#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ -#define OF_DT_END_NODE 0x2 /* End node */ -#define OF_DT_PROP 0x3 /* Property: name off, size, - * content */ -#define OF_DT_NOP 0x4 /* nop */ -#define OF_DT_END 0x9 - -#define OF_DT_VERSION 0x10 #ifndef __ASSEMBLY__ -/* - * This is what gets passed to the kernel by prom_init or kexec - * - * The dt struct contains the device tree structure, full pathes and - * property contents. The dt strings contain a separate block with just - * the strings for the property names, and is fully page aligned and - * self contained in a page, so that it can be kept around by the kernel, - * each property name appears only once in this page (cheap compression) - * - * the mem_rsvmap contains a map of reserved ranges of physical memory, - * passing it here instead of in the device-tree itself greatly simplifies - * the job of everybody. It's just a list of u64 pairs (base/size) that - * ends when size is 0 - */ -struct boot_param_header { - __be32 magic; /* magic word OF_DT_HEADER */ - __be32 totalsize; /* total size of DT block */ - __be32 off_dt_struct; /* offset to structure */ - __be32 off_dt_strings; /* offset to strings */ - __be32 off_mem_rsvmap; /* offset to memory reserve map */ - __be32 version; /* format version */ - __be32 last_comp_version; /* last compatible version */ - /* version 2 fields below */ - __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */ - /* version 3 fields below */ - __be32 dt_strings_size; /* size of the DT strings block */ - /* version 17 fields below */ - __be32 dt_struct_size; /* size of the DT structure block */ -}; #if defined(CONFIG_OF_FLATTREE) -- cgit v1.2.3 From 37cfdaf782590e277d9352626dba4496734e0375 Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:06 +0300 Subject: usb: phy: msm: Move global regulators variables to driver state Eliminating global variables allows driver to handle multiple device instances. Signed-off-by: Ivan T. Ivanov Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-msm-usb.c | 82 ++++++++++++++++++++----------------------- include/linux/usb/msm_hsusb.h | 3 ++ 2 files changed, 42 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 5b37b81f2ef6..878f67d29ed5 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -58,47 +58,43 @@ #define USB_PHY_VDD_DIG_VOL_MIN 1000000 /* uV */ #define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */ -static struct regulator *hsusb_3p3; -static struct regulator *hsusb_1p8; -static struct regulator *hsusb_vddcx; - static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init) { int ret = 0; if (init) { - hsusb_vddcx = regulator_get(motg->phy.dev, "HSUSB_VDDCX"); - if (IS_ERR(hsusb_vddcx)) { + motg->vddcx = regulator_get(motg->phy.dev, "HSUSB_VDDCX"); + if (IS_ERR(motg->vddcx)) { dev_err(motg->phy.dev, "unable to get hsusb vddcx\n"); - return PTR_ERR(hsusb_vddcx); + return PTR_ERR(motg->vddcx); } - ret = regulator_set_voltage(hsusb_vddcx, + ret = regulator_set_voltage(motg->vddcx, USB_PHY_VDD_DIG_VOL_MIN, USB_PHY_VDD_DIG_VOL_MAX); if (ret) { dev_err(motg->phy.dev, "unable to set the voltage " "for hsusb vddcx\n"); - regulator_put(hsusb_vddcx); + regulator_put(motg->vddcx); return ret; } - ret = regulator_enable(hsusb_vddcx); + ret = regulator_enable(motg->vddcx); if (ret) { dev_err(motg->phy.dev, "unable to enable hsusb vddcx\n"); - regulator_put(hsusb_vddcx); + regulator_put(motg->vddcx); } } else { - ret = regulator_set_voltage(hsusb_vddcx, 0, + ret = regulator_set_voltage(motg->vddcx, 0, USB_PHY_VDD_DIG_VOL_MAX); if (ret) dev_err(motg->phy.dev, "unable to set the voltage " "for hsusb vddcx\n"); - ret = regulator_disable(hsusb_vddcx); + ret = regulator_disable(motg->vddcx); if (ret) dev_err(motg->phy.dev, "unable to disable hsusb vddcx\n"); - regulator_put(hsusb_vddcx); + regulator_put(motg->vddcx); } return ret; @@ -109,38 +105,38 @@ static int msm_hsusb_ldo_init(struct msm_otg *motg, int init) int rc = 0; if (init) { - hsusb_3p3 = regulator_get(motg->phy.dev, "HSUSB_3p3"); - if (IS_ERR(hsusb_3p3)) { + motg->v3p3 = regulator_get(motg->phy.dev, "HSUSB_3p3"); + if (IS_ERR(motg->v3p3)) { dev_err(motg->phy.dev, "unable to get hsusb 3p3\n"); - return PTR_ERR(hsusb_3p3); + return PTR_ERR(motg->v3p3); } - rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN, + rc = regulator_set_voltage(motg->v3p3, USB_PHY_3P3_VOL_MIN, USB_PHY_3P3_VOL_MAX); if (rc) { dev_err(motg->phy.dev, "unable to set voltage level " "for hsusb 3p3\n"); goto put_3p3; } - rc = regulator_enable(hsusb_3p3); + rc = regulator_enable(motg->v3p3); if (rc) { dev_err(motg->phy.dev, "unable to enable the hsusb 3p3\n"); goto put_3p3; } - hsusb_1p8 = regulator_get(motg->phy.dev, "HSUSB_1p8"); - if (IS_ERR(hsusb_1p8)) { + motg->v1p8 = regulator_get(motg->phy.dev, "HSUSB_1p8"); + if (IS_ERR(motg->v1p8)) { dev_err(motg->phy.dev, "unable to get hsusb 1p8\n"); - rc = PTR_ERR(hsusb_1p8); + rc = PTR_ERR(motg->v1p8); goto disable_3p3; } - rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN, + rc = regulator_set_voltage(motg->v1p8, USB_PHY_1P8_VOL_MIN, USB_PHY_1P8_VOL_MAX); if (rc) { dev_err(motg->phy.dev, "unable to set voltage level " "for hsusb 1p8\n"); goto put_1p8; } - rc = regulator_enable(hsusb_1p8); + rc = regulator_enable(motg->v1p8); if (rc) { dev_err(motg->phy.dev, "unable to enable the hsusb 1p8\n"); goto put_1p8; @@ -149,54 +145,54 @@ static int msm_hsusb_ldo_init(struct msm_otg *motg, int init) return 0; } - regulator_disable(hsusb_1p8); + regulator_disable(motg->v1p8); put_1p8: - regulator_put(hsusb_1p8); + regulator_put(motg->v1p8); disable_3p3: - regulator_disable(hsusb_3p3); + regulator_disable(motg->v3p3); put_3p3: - regulator_put(hsusb_3p3); + regulator_put(motg->v3p3); return rc; } -static int msm_hsusb_ldo_set_mode(int on) +static int msm_hsusb_ldo_set_mode(struct msm_otg *motg, int on) { int ret = 0; - if (!hsusb_1p8 || IS_ERR(hsusb_1p8)) { + if (!motg->v1p8 || IS_ERR(motg->v1p8)) { pr_err("%s: HSUSB_1p8 is not initialized\n", __func__); return -ENODEV; } - if (!hsusb_3p3 || IS_ERR(hsusb_3p3)) { + if (!motg->v3p3 || IS_ERR(motg->v3p3)) { pr_err("%s: HSUSB_3p3 is not initialized\n", __func__); return -ENODEV; } if (on) { - ret = regulator_set_optimum_mode(hsusb_1p8, + ret = regulator_set_optimum_mode(motg->v1p8, USB_PHY_1P8_HPM_LOAD); if (ret < 0) { pr_err("%s: Unable to set HPM of the regulator " "HSUSB_1p8\n", __func__); return ret; } - ret = regulator_set_optimum_mode(hsusb_3p3, + ret = regulator_set_optimum_mode(motg->v3p3, USB_PHY_3P3_HPM_LOAD); if (ret < 0) { pr_err("%s: Unable to set HPM of the regulator " "HSUSB_3p3\n", __func__); - regulator_set_optimum_mode(hsusb_1p8, + regulator_set_optimum_mode(motg->v1p8, USB_PHY_1P8_LPM_LOAD); return ret; } } else { - ret = regulator_set_optimum_mode(hsusb_1p8, + ret = regulator_set_optimum_mode(motg->v1p8, USB_PHY_1P8_LPM_LOAD); if (ret < 0) pr_err("%s: Unable to set LPM of the regulator " "HSUSB_1p8\n", __func__); - ret = regulator_set_optimum_mode(hsusb_3p3, + ret = regulator_set_optimum_mode(motg->v3p3, USB_PHY_3P3_LPM_LOAD); if (ret < 0) pr_err("%s: Unable to set LPM of the regulator " @@ -417,7 +413,7 @@ static int msm_otg_reset(struct usb_phy *phy) #ifdef CONFIG_PM #define USB_PHY_SUSP_DIG_VOL 500000 -static int msm_hsusb_config_vddcx(int high) +static int msm_hsusb_config_vddcx(struct msm_otg *motg, int high) { int max_vol = USB_PHY_VDD_DIG_VOL_MAX; int min_vol; @@ -428,7 +424,7 @@ static int msm_hsusb_config_vddcx(int high) else min_vol = USB_PHY_SUSP_DIG_VOL; - ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol); + ret = regulator_set_voltage(motg->vddcx, min_vol, max_vol); if (ret) { pr_err("%s: unable to set the voltage for regulator " "HSUSB_VDDCX\n", __func__); @@ -518,8 +514,8 @@ static int msm_otg_suspend(struct msm_otg *motg) if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY && motg->pdata->otg_control == OTG_PMIC_CONTROL) { - msm_hsusb_ldo_set_mode(0); - msm_hsusb_config_vddcx(0); + msm_hsusb_ldo_set_mode(motg, 0); + msm_hsusb_config_vddcx(motg, 0); } if (device_may_wakeup(phy->dev)) @@ -555,8 +551,8 @@ static int msm_otg_resume(struct msm_otg *motg) if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY && motg->pdata->otg_control == OTG_PMIC_CONTROL) { - msm_hsusb_ldo_set_mode(1); - msm_hsusb_config_vddcx(1); + msm_hsusb_ldo_set_mode(motg, 1); + msm_hsusb_config_vddcx(motg, 1); writel(readl(USB_PHY_CTRL) & ~PHY_RETEN, USB_PHY_CTRL); } @@ -1521,7 +1517,7 @@ static int __init msm_otg_probe(struct platform_device *pdev) dev_err(&pdev->dev, "hsusb vreg configuration failed\n"); goto vddcx_exit; } - ret = msm_hsusb_ldo_set_mode(1); + ret = msm_hsusb_ldo_set_mode(motg, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg enable failed\n"); goto ldo_exit; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 32754835a39b..8705b0164684 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -183,6 +183,9 @@ struct msm_otg { enum usb_chg_state chg_state; enum usb_chg_type chg_type; u8 dcd_retries; + struct regulator *v3p3; + struct regulator *v1p8; + struct regulator *vddcx; }; #endif -- cgit v1.2.3 From 971232cf7c7a71ad3cbf433f592eee3ae1a578ac Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:11 +0300 Subject: usb: phy: msm: Replace custom enum usb_mode_type with enum usb_dr_mode Use enum usb_dr_mode and drop default usb_dr_mode from platform data. USB DT bindings states: dr_mode: "...In case this attribute isn't passed via DT, USB DRD controllers should default to OTG...", so remove redundand field. Signed-off-by: Ivan T. Ivanov Acked-by: David Brown Signed-off-by: Felipe Balbi --- arch/arm/mach-msm/board-msm7x30.c | 2 +- arch/arm/mach-msm/board-qsd8x50.c | 2 +- drivers/usb/phy/phy-msm-usb.c | 41 ++++++++++++++++----------------------- include/linux/usb/msm_hsusb.h | 20 +------------------ 4 files changed, 20 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c index 46de789ad3ae..0c4c200e1221 100644 --- a/arch/arm/mach-msm/board-msm7x30.c +++ b/arch/arm/mach-msm/board-msm7x30.c @@ -95,7 +95,7 @@ static int hsusb_phy_clk_reset(struct clk *phy_clk) static struct msm_otg_platform_data msm_otg_pdata = { .phy_init_seq = hsusb_phy_init_seq, - .mode = USB_PERIPHERAL, + .mode = USB_DR_MODE_PERIPHERAL, .otg_control = OTG_PHY_CONTROL, .link_clk_reset = hsusb_link_clk_reset, .phy_clk_reset = hsusb_phy_clk_reset, diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 9169ec324a43..4c748616ef47 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c @@ -116,7 +116,7 @@ static int hsusb_phy_clk_reset(struct clk *phy_clk) static struct msm_otg_platform_data msm_otg_pdata = { .phy_init_seq = hsusb_phy_init_seq, - .mode = USB_PERIPHERAL, + .mode = USB_DR_MODE_PERIPHERAL, .otg_control = OTG_PHY_CONTROL, .link_clk_reset = hsusb_link_clk_reset, .phy_clk_reset = hsusb_phy_clk_reset, diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 874c51a85683..7eb2abf3f874 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -348,10 +348,10 @@ static int msm_otg_reset(struct usb_phy *phy) if (pdata->otg_control == OTG_PHY_CONTROL) { val = readl(USB_OTGSC); - if (pdata->mode == USB_OTG) { + if (pdata->mode == USB_DR_MODE_OTG) { ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID; val |= OTGSC_IDIE | OTGSC_BSVIE; - } else if (pdata->mode == USB_PERIPHERAL) { + } else if (pdata->mode == USB_DR_MODE_PERIPHERAL) { ulpi_val = ULPI_INT_SESS_VALID; val |= OTGSC_BSVIE; } @@ -637,7 +637,7 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host) * Fail host registration if this board can support * only peripheral configuration. */ - if (motg->pdata->mode == USB_PERIPHERAL) { + if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL) { dev_info(otg->phy->dev, "Host mode is not supported\n"); return -ENODEV; } @@ -666,7 +666,7 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host) * Kick the state machine work, if peripheral is not supported * or peripheral is already registered with us. */ - if (motg->pdata->mode == USB_HOST || otg->gadget) { + if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) { pm_runtime_get_sync(otg->phy->dev); schedule_work(&motg->sm_work); } @@ -710,7 +710,7 @@ static int msm_otg_set_peripheral(struct usb_otg *otg, * Fail peripheral registration if this board can support * only host configuration. */ - if (motg->pdata->mode == USB_HOST) { + if (motg->pdata->mode == USB_DR_MODE_HOST) { dev_info(otg->phy->dev, "Peripheral mode is not supported\n"); return -ENODEV; } @@ -735,7 +735,7 @@ static int msm_otg_set_peripheral(struct usb_otg *otg, * Kick the state machine work, if host is not supported * or host is already registered with us. */ - if (motg->pdata->mode == USB_PERIPHERAL || otg->host) { + if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) { pm_runtime_get_sync(otg->phy->dev); schedule_work(&motg->sm_work); } @@ -1056,7 +1056,7 @@ static void msm_otg_init_sm(struct msm_otg *motg) u32 otgsc = readl(USB_OTGSC); switch (pdata->mode) { - case USB_OTG: + case USB_DR_MODE_OTG: if (pdata->otg_control == OTG_PHY_CONTROL) { if (otgsc & OTGSC_ID) set_bit(ID, &motg->inputs); @@ -1068,21 +1068,14 @@ static void msm_otg_init_sm(struct msm_otg *motg) else clear_bit(B_SESS_VLD, &motg->inputs); } else if (pdata->otg_control == OTG_USER_CONTROL) { - if (pdata->default_mode == USB_HOST) { - clear_bit(ID, &motg->inputs); - } else if (pdata->default_mode == USB_PERIPHERAL) { - set_bit(ID, &motg->inputs); - set_bit(B_SESS_VLD, &motg->inputs); - } else { set_bit(ID, &motg->inputs); clear_bit(B_SESS_VLD, &motg->inputs); - } } break; - case USB_HOST: + case USB_DR_MODE_HOST: clear_bit(ID, &motg->inputs); break; - case USB_PERIPHERAL: + case USB_DR_MODE_PERIPHERAL: set_bit(ID, &motg->inputs); if (otgsc & OTGSC_BSV) set_bit(B_SESS_VLD, &motg->inputs); @@ -1258,7 +1251,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf, char buf[16]; struct usb_otg *otg = motg->phy.otg; int status = count; - enum usb_mode_type req_mode; + enum usb_dr_mode req_mode; memset(buf, 0x00, sizeof(buf)); @@ -1268,18 +1261,18 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf, } if (!strncmp(buf, "host", 4)) { - req_mode = USB_HOST; + req_mode = USB_DR_MODE_HOST; } else if (!strncmp(buf, "peripheral", 10)) { - req_mode = USB_PERIPHERAL; + req_mode = USB_DR_MODE_PERIPHERAL; } else if (!strncmp(buf, "none", 4)) { - req_mode = USB_NONE; + req_mode = USB_DR_MODE_UNKNOWN; } else { status = -EINVAL; goto out; } switch (req_mode) { - case USB_NONE: + case USB_DR_MODE_UNKNOWN: switch (otg->phy->state) { case OTG_STATE_A_HOST: case OTG_STATE_B_PERIPHERAL: @@ -1290,7 +1283,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf, goto out; } break; - case USB_PERIPHERAL: + case USB_DR_MODE_PERIPHERAL: switch (otg->phy->state) { case OTG_STATE_B_IDLE: case OTG_STATE_A_HOST: @@ -1301,7 +1294,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf, goto out; } break; - case USB_HOST: + case USB_DR_MODE_HOST: switch (otg->phy->state) { case OTG_STATE_B_IDLE: case OTG_STATE_B_PERIPHERAL: @@ -1511,7 +1504,7 @@ static int msm_otg_probe(struct platform_device *pdev) platform_set_drvdata(pdev, motg); device_init_wakeup(&pdev->dev, 1); - if (motg->pdata->mode == USB_OTG && + if (motg->pdata->mode == USB_DR_MODE_OTG && motg->pdata->otg_control == OTG_USER_CONTROL) { ret = msm_otg_debugfs_init(motg); if (ret) diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 8705b0164684..72c5830455bf 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -22,21 +22,6 @@ #include #include -/** - * Supported USB modes - * - * USB_PERIPHERAL Only peripheral mode is supported. - * USB_HOST Only host mode is supported. - * USB_OTG OTG mode is supported. - * - */ -enum usb_mode_type { - USB_NONE = 0, - USB_PERIPHERAL, - USB_HOST, - USB_OTG, -}; - /** * OTG control * @@ -121,8 +106,6 @@ enum usb_chg_type { * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). * @mode: Supported mode (OTG/peripheral/host). * @otg_control: OTG switch controlled by user/Id pin - * @default_mode: Default operational mode. Applicable only if - * OTG switch is controller by user. * @pclk_src_name: pclk is derived from ebi1_usb_clk in case of 7x27 and 8k * dfab_usb_hs_clk in case of 8660 and 8960. */ @@ -130,9 +113,8 @@ struct msm_otg_platform_data { int *phy_init_seq; void (*vbus_power)(bool on); unsigned power_budget; - enum usb_mode_type mode; + enum usb_dr_mode mode; enum otg_control_type otg_control; - enum usb_mode_type default_mode; enum msm_usb_phy_type phy_type; void (*setup_gpio)(enum usb_otg_state state); char *pclk_src_name; -- cgit v1.2.3 From ff0e4a68c931dc34e43c081d1b6a895a9aaf8a2b Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:12 +0300 Subject: usb: phy: msm: Remove unused pclk_src_name There are no references to 'pclk_src_name' in plaform code, so it is unused. Signed-off-by: Ivan T. Ivanov Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-msm-usb.c | 26 +------------------------- include/linux/usb/msm_hsusb.h | 5 ----- 2 files changed, 1 insertion(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 7eb2abf3f874..c2361bfd2002 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -464,9 +464,6 @@ static int msm_otg_suspend(struct msm_otg *motg) if (!IS_ERR(motg->core_clk)) clk_disable_unprepare(motg->core_clk); - if (!IS_ERR(motg->pclk_src)) - clk_disable_unprepare(motg->pclk_src); - if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY && motg->pdata->otg_control == OTG_PMIC_CONTROL) { msm_hsusb_ldo_set_mode(motg, 0); @@ -496,9 +493,6 @@ static int msm_otg_resume(struct msm_otg *motg) if (!atomic_read(&motg->in_lpm)) return 0; - if (!IS_ERR(motg->pclk_src)) - clk_prepare_enable(motg->pclk_src); - clk_prepare_enable(motg->pclk); clk_prepare_enable(motg->clk); if (!IS_ERR(motg->core_clk)) @@ -1396,17 +1390,8 @@ static int msm_otg_probe(struct platform_device *pdev) * If USB Core is running its protocol engine based on CORE CLK, * CORE CLK must be running at >55Mhz for correct HSUSB * operation and USB core cannot tolerate frequency changes on - * CORE CLK. For such USB cores, vote for maximum clk frequency - * on pclk source + * CORE CLK. */ - motg->pclk_src = ERR_PTR(-ENOENT); - if (motg->pdata->pclk_src_name) { - motg->pclk_src = devm_clk_get(&pdev->dev, - motg->pdata->pclk_src_name); - if (IS_ERR(motg->pclk_src)) - return PTR_ERR(motg->pclk_src); - } - motg->pclk = devm_clk_get(&pdev->dev, "usb_hs_pclk"); if (IS_ERR(motg->pclk)) { dev_err(&pdev->dev, "failed to get usb_hs_pclk\n"); @@ -1446,10 +1431,6 @@ static int msm_otg_probe(struct platform_device *pdev) motg->v1p8 = regs[2].consumer; clk_set_rate(motg->clk, 60000000); - if (!IS_ERR(motg->pclk_src)) { - clk_set_rate(motg->pclk_src, INT_MAX); - clk_prepare_enable(motg->pclk_src); - } clk_prepare_enable(motg->clk); clk_prepare_enable(motg->pclk); @@ -1525,8 +1506,6 @@ disable_clks: clk_disable_unprepare(motg->clk); if (!IS_ERR(motg->core_clk)) clk_disable_unprepare(motg->core_clk); - if (!IS_ERR(motg->pclk_src)) - clk_disable_unprepare(motg->pclk_src); return ret; } @@ -1571,9 +1550,6 @@ static int msm_otg_remove(struct platform_device *pdev) clk_disable_unprepare(motg->clk); if (!IS_ERR(motg->core_clk)) clk_disable_unprepare(motg->core_clk); - if (!IS_ERR(motg->pclk_src)) - clk_disable_unprepare(motg->pclk_src); - msm_hsusb_ldo_init(motg, 0); pm_runtime_set_suspended(&pdev->dev); diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 72c5830455bf..262ed80a0b9e 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -106,8 +106,6 @@ enum usb_chg_type { * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). * @mode: Supported mode (OTG/peripheral/host). * @otg_control: OTG switch controlled by user/Id pin - * @pclk_src_name: pclk is derived from ebi1_usb_clk in case of 7x27 and 8k - * dfab_usb_hs_clk in case of 8660 and 8960. */ struct msm_otg_platform_data { int *phy_init_seq; @@ -117,7 +115,6 @@ struct msm_otg_platform_data { enum otg_control_type otg_control; enum msm_usb_phy_type phy_type; void (*setup_gpio)(enum usb_otg_state state); - char *pclk_src_name; int (*link_clk_reset)(struct clk *link_clk, bool assert); int (*phy_clk_reset)(struct clk *phy_clk); }; @@ -129,7 +126,6 @@ struct msm_otg_platform_data { * @irq: IRQ number assigned for HSUSB controller. * @clk: clock struct of usb_hs_clk. * @pclk: clock struct of usb_hs_pclk. - * @pclk_src: pclk source for voting. * @phy_reset_clk: clock struct of usb_phy_clk. * @core_clk: clock struct of usb_hs_core_clk. * @regs: ioremapped register base address. @@ -150,7 +146,6 @@ struct msm_otg { int irq; struct clk *clk; struct clk *pclk; - struct clk *pclk_src; struct clk *phy_reset_clk; struct clk *core_clk; void __iomem *regs; -- cgit v1.2.3 From 8364f9af237f47fa128bd4e4f7b45beef890c994 Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:15 +0300 Subject: usb: phy: msm: Add device tree support and binding information Allows controller to be specified via device tree. Signed-off-by: Ivan T. Ivanov Signed-off-by: Felipe Balbi --- .../devicetree/bindings/usb/msm-hsusb.txt | 67 ++++++++++++ drivers/usb/phy/phy-msm-usb.c | 113 +++++++++++++++++---- include/linux/usb/msm_hsusb.h | 6 +- 3 files changed, 165 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt index 5ea26c631e3a..ee4123de3de4 100644 --- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt +++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt @@ -15,3 +15,70 @@ Example EHCI controller device node: usb-phy = <&usb_otg>; }; +USB PHY with optional OTG: + +Required properties: +- compatible: Should contain: + "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY + "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY + +- regs: Offset and length of the register set in the memory map +- interrupts: interrupt-specifier for the OTG interrupt. + +- clocks: A list of phandle + clock-specifier pairs for the + clocks listed in clock-names +- clock-names: Should contain the following: + "phy" USB PHY reference clock + "core" Protocol engine clock + "iface" Interface bus clock + "alt_core" Protocol engine clock for targets with asynchronous + reset methodology. (optional) + +- vdccx-supply: phandle to the regulator for the vdd supply for + digital circuit operation. +- v1p8-supply: phandle to the regulator for the 1.8V supply +- v3p3-supply: phandle to the regulator for the 3.3V supply + +- resets: A list of phandle + reset-specifier pairs for the + resets listed in reset-names +- reset-names: Should contain the following: + "phy" USB PHY controller reset + "link" USB LINK controller reset + +- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of + 1 - PHY control + 2 - PMIC control + +Optional properties: +- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg" + +- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device + Mode Eye Diagram test. Start address at which these values will be + written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as + "do not overwrite default value at this address". + For example: qcom,phy-init-sequence = < -1 0x63 >; + Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1. + +Example HSUSB OTG controller device node: + + usb@f9a55000 { + compatible = "qcom,usb-otg-snps"; + reg = <0xf9a55000 0x400>; + interrupts = <0 134 0>; + dr_mode = "peripheral"; + + clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>, + <&gcc GCC_USB_HS_AHB_CLK>; + + clock-names = "phy", "core", "iface"; + + vddcx-supply = <&pm8841_s2_corner>; + v1p8-supply = <&pm8941_l6>; + v3p3-supply = <&pm8941_l24>; + + resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>; + reset-names = "phy", "link"; + + qcom,otg-control = <1>; + qcom,phy-init-sequence = < -1 0x63 >; + }; diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 7e968aa143ce..1bf2d4ee29d2 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -30,9 +30,12 @@ #include #include #include +#include +#include #include #include +#include #include #include #include @@ -217,16 +220,16 @@ static struct usb_phy_io_ops msm_otg_io_ops = { static void ulpi_init(struct msm_otg *motg) { struct msm_otg_platform_data *pdata = motg->pdata; - int *seq = pdata->phy_init_seq; + int *seq = pdata->phy_init_seq, idx; + u32 addr = ULPI_EXT_VENDOR_SPECIFIC; - if (!seq) - return; + for (idx = 0; idx < pdata->phy_init_sz; idx++) { + if (seq[idx] == -1) + continue; - while (seq[0] >= 0) { dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n", - seq[0], seq[1]); - ulpi_write(&motg->phy, seq[0], seq[1]); - seq += 2; + seq[idx], addr + idx); + ulpi_write(&motg->phy, seq[idx], addr + idx); } } @@ -1343,26 +1346,96 @@ static void msm_otg_debugfs_cleanup(void) debugfs_remove(msm_otg_dbg_root); } +static struct of_device_id msm_otg_dt_match[] = { + { + .compatible = "qcom,usb-otg-ci", + .data = (void *) CI_45NM_INTEGRATED_PHY + }, + { + .compatible = "qcom,usb-otg-snps", + .data = (void *) SNPS_28NM_INTEGRATED_PHY + }, + { } +}; +MODULE_DEVICE_TABLE(of, msm_otg_dt_match); + +static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg) +{ + struct msm_otg_platform_data *pdata; + const struct of_device_id *id; + struct device_node *node = pdev->dev.of_node; + struct property *prop; + int len, ret, words; + u32 val; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + motg->pdata = pdata; + + id = of_match_device(msm_otg_dt_match, &pdev->dev); + pdata->phy_type = (int) id->data; + + pdata->mode = of_usb_get_dr_mode(node); + if (pdata->mode == USB_DR_MODE_UNKNOWN) + pdata->mode = USB_DR_MODE_OTG; + + pdata->otg_control = OTG_PHY_CONTROL; + if (!of_property_read_u32(node, "qcom,otg-control", &val)) + if (val == OTG_PMIC_CONTROL) + pdata->otg_control = val; + + prop = of_find_property(node, "qcom,phy-init-sequence", &len); + if (!prop || !len) + return 0; + + words = len / sizeof(u32); + + if (words >= ULPI_EXT_VENDOR_SPECIFIC) { + dev_warn(&pdev->dev, "Too big PHY init sequence %d\n", words); + return 0; + } + + pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); + if (!pdata->phy_init_seq) { + dev_warn(&pdev->dev, "No space for PHY init sequence\n"); + return 0; + } + + ret = of_property_read_u32_array(node, "qcom,phy-init-sequence", + pdata->phy_init_seq, words); + if (!ret) + pdata->phy_init_sz = words; + + return 0; +} + static int msm_otg_probe(struct platform_device *pdev) { struct regulator_bulk_data regs[3]; int ret = 0; + struct device_node *np = pdev->dev.of_node; + struct msm_otg_platform_data *pdata; struct resource *res; struct msm_otg *motg; struct usb_phy *phy; - dev_info(&pdev->dev, "msm_otg probe\n"); - if (!dev_get_platdata(&pdev->dev)) { - dev_err(&pdev->dev, "No platform data given. Bailing out\n"); - return -ENODEV; - } - motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL); if (!motg) { dev_err(&pdev->dev, "unable to allocate msm_otg\n"); return -ENOMEM; } + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + if (!np) + return -ENXIO; + ret = msm_otg_read_dt(pdev, motg); + if (ret) + return ret; + } + motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), GFP_KERNEL); if (!motg->phy.otg) { @@ -1370,17 +1443,17 @@ static int msm_otg_probe(struct platform_device *pdev) return -ENOMEM; } - motg->pdata = dev_get_platdata(&pdev->dev); phy = &motg->phy; phy->dev = &pdev->dev; - motg->phy_reset_clk = devm_clk_get(&pdev->dev, "usb_phy_clk"); + motg->phy_reset_clk = devm_clk_get(&pdev->dev, + np ? "phy" : "usb_phy_clk"); if (IS_ERR(motg->phy_reset_clk)) { dev_err(&pdev->dev, "failed to get usb_phy_clk\n"); return PTR_ERR(motg->phy_reset_clk); } - motg->clk = devm_clk_get(&pdev->dev, "usb_hs_clk"); + motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk"); if (IS_ERR(motg->clk)) { dev_err(&pdev->dev, "failed to get usb_hs_clk\n"); return PTR_ERR(motg->clk); @@ -1392,7 +1465,7 @@ static int msm_otg_probe(struct platform_device *pdev) * operation and USB core cannot tolerate frequency changes on * CORE CLK. */ - motg->pclk = devm_clk_get(&pdev->dev, "usb_hs_pclk"); + motg->pclk = devm_clk_get(&pdev->dev, np ? "iface" : "usb_hs_pclk"); if (IS_ERR(motg->pclk)) { dev_err(&pdev->dev, "failed to get usb_hs_pclk\n"); return PTR_ERR(motg->pclk); @@ -1403,7 +1476,8 @@ static int msm_otg_probe(struct platform_device *pdev) * clock is introduced to remove the dependency on AXI * bus frequency. */ - motg->core_clk = devm_clk_get(&pdev->dev, "usb_hs_core_clk"); + motg->core_clk = devm_clk_get(&pdev->dev, + np ? "alt_core" : "usb_hs_core_clk"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); motg->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); @@ -1486,7 +1560,7 @@ static int msm_otg_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); if (motg->pdata->mode == USB_DR_MODE_OTG && - motg->pdata->otg_control == OTG_USER_CONTROL) { + motg->pdata->otg_control == OTG_USER_CONTROL) { ret = msm_otg_debugfs_init(motg); if (ret) dev_dbg(&pdev->dev, "Can not create mode change file\n"); @@ -1639,6 +1713,7 @@ static struct platform_driver msm_otg_driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .pm = &msm_otg_dev_pm_ops, + .of_match_table = msm_otg_dt_match, }, }; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 262ed80a0b9e..bd68299c278e 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -100,8 +100,9 @@ enum usb_chg_type { /** * struct msm_otg_platform_data - platform device data * for msm_otg driver. - * @phy_init_seq: PHY configuration sequence. val, reg pairs - * terminated by -1. + * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as + * "do not overwrite default vaule at this address". + * @phy_init_sz: PHY configuration sequence size. * @vbus_power: VBUS power on/off routine. * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). * @mode: Supported mode (OTG/peripheral/host). @@ -109,6 +110,7 @@ enum usb_chg_type { */ struct msm_otg_platform_data { int *phy_init_seq; + int phy_init_sz; void (*vbus_power)(bool on); unsigned power_budget; enum usb_dr_mode mode; -- cgit v1.2.3 From a27345434134080273e0597e1d9721ff9e6ca67f Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:16 +0300 Subject: usb: phy: msm: Use reset framework for LINK and PHY resets Using reset framework eliminate need of platform specific callbacks and enable reset lines to be specified in DT files. Signed-off-by: Ivan T. Ivanov Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-msm-usb.c | 29 +++++++++++++++++++++-------- include/linux/usb/msm_hsusb.h | 3 +++ 2 files changed, 24 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 1bf2d4ee29d2..a6abb1b3a7f0 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -235,12 +236,15 @@ static void ulpi_init(struct msm_otg *motg) static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert) { - int ret = 0; + int ret; - if (!motg->pdata->link_clk_reset) - return ret; + if (motg->pdata->link_clk_reset) + ret = motg->pdata->link_clk_reset(motg->clk, assert); + else if (assert) + ret = reset_control_assert(motg->link_rst); + else + ret = reset_control_deassert(motg->link_rst); - ret = motg->pdata->link_clk_reset(motg->clk, assert); if (ret) dev_err(motg->phy.dev, "usb link clk reset %s failed\n", assert ? "assert" : "deassert"); @@ -250,12 +254,13 @@ static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert) static int msm_otg_phy_clk_reset(struct msm_otg *motg) { - int ret = 0; + int ret; - if (!motg->pdata->phy_clk_reset) - return ret; + if (motg->pdata->phy_clk_reset) + ret = motg->pdata->phy_clk_reset(motg->phy_reset_clk); + else + ret = reset_control_reset(motg->phy_rst); - ret = motg->pdata->phy_clk_reset(motg->phy_reset_clk); if (ret) dev_err(motg->phy.dev, "usb phy clk reset failed\n"); @@ -1377,6 +1382,14 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg) id = of_match_device(msm_otg_dt_match, &pdev->dev); pdata->phy_type = (int) id->data; + motg->link_rst = devm_reset_control_get(&pdev->dev, "link"); + if (IS_ERR(motg->link_rst)) + return PTR_ERR(motg->link_rst); + + motg->phy_rst = devm_reset_control_get(&pdev->dev, "phy"); + if (IS_ERR(motg->phy_rst)) + return PTR_ERR(motg->phy_rst); + pdata->mode = of_usb_get_dr_mode(node); if (pdata->mode == USB_DR_MODE_UNKNOWN) pdata->mode = USB_DR_MODE_OTG; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index bd68299c278e..4e5d9168f52e 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -165,6 +165,9 @@ struct msm_otg { struct regulator *v3p3; struct regulator *v1p8; struct regulator *vddcx; + + struct reset_control *phy_rst; + struct reset_control *link_rst; }; #endif -- cgit v1.2.3 From cfa3ff5dfe6a11ac8bc4a080416984ab00b0980c Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:17 +0300 Subject: usb: phy: msm: Add support for secondary PHY control Allow support to use 2nd HSPHY with USB2 Core. Some platforms may have configuration to allow USB controller work with any of the two HSPHYs present. By default driver configures USB core to use primary HSPHY. Add support to allow user select 2nd HSPHY using DT parameter. Signed-off-by: Ivan T. Ivanov Cc: Manu Gautam Signed-off-by: Felipe Balbi --- .../devicetree/bindings/usb/msm-hsusb.txt | 6 ++++++ drivers/usb/phy/phy-msm-usb.c | 24 ++++++++++++++++++++-- include/linux/usb/msm_hsusb.h | 1 + include/linux/usb/msm_hsusb_hw.h | 1 + 4 files changed, 30 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt index ee4123de3de4..066966706ca1 100644 --- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt +++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt @@ -59,6 +59,12 @@ Optional properties: For example: qcom,phy-init-sequence = < -1 0x63 >; Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1. +- qcom,phy-num: Select number of pyco-phy to use, can be one of + 0 - PHY one, default + 1 - Second PHY + Some platforms may have configuration to allow USB + controller work with any of the two HSPHYs present. + Example HSUSB OTG controller device node: usb@f9a55000 { diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index a6abb1b3a7f0..8d57045ac938 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -314,6 +314,9 @@ static int msm_otg_phy_reset(struct msm_otg *motg) if (!retries) return -ETIMEDOUT; + if (motg->phy_number) + writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2); + dev_info(motg->phy.dev, "phy_reset: success\n"); return 0; } @@ -368,6 +371,9 @@ static int msm_otg_reset(struct usb_phy *phy) ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL); } + if (motg->phy_number) + writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2); + return 0; } @@ -404,6 +410,7 @@ static int msm_otg_suspend(struct msm_otg *motg) struct usb_phy *phy = &motg->phy; struct usb_bus *bus = phy->otg->host; struct msm_otg_platform_data *pdata = motg->pdata; + void __iomem *addr; int cnt = 0; if (atomic_read(&motg->in_lpm)) @@ -463,9 +470,13 @@ static int msm_otg_suspend(struct msm_otg *motg) */ writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD); + addr = USB_PHY_CTRL; + if (motg->phy_number) + addr = USB_PHY_CTRL2; + if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY && motg->pdata->otg_control == OTG_PMIC_CONTROL) - writel(readl(USB_PHY_CTRL) | PHY_RETEN, USB_PHY_CTRL); + writel(readl(addr) | PHY_RETEN, addr); clk_disable_unprepare(motg->pclk); clk_disable_unprepare(motg->clk); @@ -495,6 +506,7 @@ static int msm_otg_resume(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; struct usb_bus *bus = phy->otg->host; + void __iomem *addr; int cnt = 0; unsigned temp; @@ -508,9 +520,14 @@ static int msm_otg_resume(struct msm_otg *motg) if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY && motg->pdata->otg_control == OTG_PMIC_CONTROL) { + + addr = USB_PHY_CTRL; + if (motg->phy_number) + addr = USB_PHY_CTRL2; + msm_hsusb_ldo_set_mode(motg, 1); msm_hsusb_config_vddcx(motg, 1); - writel(readl(USB_PHY_CTRL) & ~PHY_RETEN, USB_PHY_CTRL); + writel(readl(addr) & ~PHY_RETEN, addr); } temp = readl(USB_USBCMD); @@ -1399,6 +1416,9 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg) if (val == OTG_PMIC_CONTROL) pdata->otg_control = val; + if (!of_property_read_u32(node, "qcom,phy-num", &val) && val < 2) + motg->phy_number = val; + prop = of_find_property(node, "qcom,phy-init-sequence", &len); if (!prop || !len) return 0; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 4e5d9168f52e..4628f1a4713e 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -158,6 +158,7 @@ struct msm_otg { atomic_t in_lpm; int async_int; unsigned cur_power; + int phy_number; struct delayed_work chg_work; enum usb_chg_state chg_state; enum usb_chg_type chg_type; diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index 6e97a2d3d39f..e6d703567155 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -25,6 +25,7 @@ #define USB_OTGSC (MSM_USB_BASE + 0x01A4) #define USB_USBMODE (MSM_USB_BASE + 0x01A8) #define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) +#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278) #define USBCMD_RESET 2 #define USB_USBINTR (MSM_USB_BASE + 0x0148) -- cgit v1.2.3 From d69c6f5df376ea40df5886468b155f515fddfbb2 Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:18 +0300 Subject: usb: phy: msm: Correct USB PHY Reset sequence for newer platform On few legacy platforms, USB PHY is having dedicated reset clk. It is used to reset USB PHY after putting USB PHY into low power mode and for calibration of USB PHY. Putting USB PHY into low power mode is causing ulpi read/write timeout as expected. USB PHY reset clk is not available on newer platform. For 28nm PHY, reset USB PHY after resetting USB LINK. Also reset USB PHY using USB_PHY_PON bit with USB_OTG_HS_PHY_CTRL register after programming USB PHY Override registers as suggested with hardware programming guidelines. Signed-off-by: Ivan T. Ivanov Signed-off-by: Tim Bird Cc: Mayank Rana Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-msm-usb.c | 140 ++++++++++++++++++++++++--------------- include/linux/usb/msm_hsusb_hw.h | 5 ++ 2 files changed, 93 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 8d57045ac938..bb339963f8bb 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -48,6 +48,7 @@ #define DRIVER_NAME "msm_otg" #define ULPI_IO_TIMEOUT_USEC (10 * 1000) +#define LINK_RESET_TIMEOUT_USEC (250 * 1000) #define USB_PHY_3P3_VOL_MIN 3050000 /* uV */ #define USB_PHY_3P3_VOL_MAX 3300000 /* uV */ @@ -267,77 +268,35 @@ static int msm_otg_phy_clk_reset(struct msm_otg *motg) return ret; } -static int msm_otg_phy_reset(struct msm_otg *motg) +static int msm_link_reset(struct msm_otg *motg) { u32 val; int ret; - int retries; ret = msm_otg_link_clk_reset(motg, 1); - if (ret) - return ret; - ret = msm_otg_phy_clk_reset(motg); - if (ret) - return ret; - ret = msm_otg_link_clk_reset(motg, 0); if (ret) return ret; - val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK; - writel(val | PORTSC_PTS_ULPI, USB_PORTSC); - - for (retries = 3; retries > 0; retries--) { - ret = ulpi_write(&motg->phy, ULPI_FUNC_CTRL_SUSPENDM, - ULPI_CLR(ULPI_FUNC_CTRL)); - if (!ret) - break; - ret = msm_otg_phy_clk_reset(motg); - if (ret) - return ret; - } - if (!retries) - return -ETIMEDOUT; + /* wait for 1ms delay as suggested in HPG. */ + usleep_range(1000, 1200); - /* This reset calibrates the phy, if the above write succeeded */ - ret = msm_otg_phy_clk_reset(motg); + ret = msm_otg_link_clk_reset(motg, 0); if (ret) return ret; - for (retries = 3; retries > 0; retries--) { - ret = ulpi_read(&motg->phy, ULPI_DEBUG); - if (ret != -ETIMEDOUT) - break; - ret = msm_otg_phy_clk_reset(motg); - if (ret) - return ret; - } - if (!retries) - return -ETIMEDOUT; - if (motg->phy_number) writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2); - dev_info(motg->phy.dev, "phy_reset: success\n"); + val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK; + writel(val | PORTSC_PTS_ULPI, USB_PORTSC); + return 0; } -#define LINK_RESET_TIMEOUT_USEC (250 * 1000) static int msm_otg_reset(struct usb_phy *phy) { struct msm_otg *motg = container_of(phy, struct msm_otg, phy); - struct msm_otg_platform_data *pdata = motg->pdata; int cnt = 0; - int ret; - u32 val = 0; - u32 ulpi_val = 0; - - ret = msm_otg_phy_reset(motg); - if (ret) { - dev_err(phy->dev, "phy_reset failed\n"); - return ret; - } - - ulpi_init(motg); writel(USBCMD_RESET, USB_USBCMD); while (cnt < LINK_RESET_TIMEOUT_USEC) { @@ -351,11 +310,86 @@ static int msm_otg_reset(struct usb_phy *phy) /* select ULPI phy */ writel(0x80000000, USB_PORTSC); + writel(0x0, USB_AHBBURST); + writel(0x08, USB_AHBMODE); + + if (motg->phy_number) + writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2); + return 0; +} + +static void msm_phy_reset(struct msm_otg *motg) +{ + void __iomem *addr; + + if (motg->pdata->phy_type != SNPS_28NM_INTEGRATED_PHY) { + msm_otg_phy_clk_reset(motg); + return; + } + + addr = USB_PHY_CTRL; + if (motg->phy_number) + addr = USB_PHY_CTRL2; + + /* Assert USB PHY_POR */ + writel(readl(addr) | PHY_POR_ASSERT, addr); + + /* + * wait for minimum 10 microseconds as suggested in HPG. + * Use a slightly larger value since the exact value didn't + * work 100% of the time. + */ + udelay(12); + + /* Deassert USB PHY_POR */ + writel(readl(addr) & ~PHY_POR_ASSERT, addr); +} + +static int msm_usb_reset(struct usb_phy *phy) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + int ret; + + if (!IS_ERR(motg->core_clk)) + clk_prepare_enable(motg->core_clk); + + ret = msm_link_reset(motg); + if (ret) { + dev_err(phy->dev, "phy_reset failed\n"); + return ret; + } + + ret = msm_otg_reset(&motg->phy); + if (ret) { + dev_err(phy->dev, "link reset failed\n"); + return ret; + } msleep(100); - writel(0x0, USB_AHBBURST); - writel(0x00, USB_AHBMODE); + /* Reset USB PHY after performing USB Link RESET */ + msm_phy_reset(motg); + + if (!IS_ERR(motg->core_clk)) + clk_disable_unprepare(motg->core_clk); + + return 0; +} + +static int msm_phy_init(struct usb_phy *phy) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + struct msm_otg_platform_data *pdata = motg->pdata; + u32 val, ulpi_val = 0; + + /* Program USB PHY Override registers. */ + ulpi_init(motg); + + /* + * It is recommended in HPG to reset USB PHY after programming + * USB PHY Override registers. + */ + msm_phy_reset(motg); if (pdata->otg_control == OTG_PHY_CONTROL) { val = readl(USB_OTGSC); @@ -1574,7 +1608,7 @@ static int msm_otg_probe(struct platform_device *pdev) goto disable_ldo; } - phy->init = msm_otg_reset; + phy->init = msm_phy_init; phy->set_power = msm_otg_set_power; phy->io_ops = &msm_otg_io_ops; @@ -1583,6 +1617,8 @@ static int msm_otg_probe(struct platform_device *pdev) phy->otg->set_host = msm_otg_set_host; phy->otg->set_peripheral = msm_otg_set_peripheral; + msm_usb_reset(phy); + ret = usb_add_phy(&motg->phy, USB_PHY_TYPE_USB2); if (ret) { dev_err(&pdev->dev, "usb_add_phy failed\n"); diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index e6d703567155..575c74397e52 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -42,9 +42,14 @@ #define ULPI_DATA(n) ((n) & 255) #define ULPI_DATA_READ(n) (((n) >> 8) & 255) +/* synopsys 28nm phy registers */ +#define ULPI_PWR_CLK_MNG_REG 0x88 +#define OTG_COMP_DISABLE BIT(0) + #define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ #define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ #define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ +#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */ /* OTG definitions */ #define OTGSC_INTSTS_MASK (0x7f << 16) -- cgit v1.2.3 From 9f27984b9e098ce0a35b210ec0315c76108494e4 Mon Sep 17 00:00:00 2001 From: Tim Bird Date: Mon, 28 Apr 2014 16:34:19 +0300 Subject: usb: phy: msm: Fix PTS definitions for MSM USB controller Fix the value used for Parallel Transceiver Select (PTS) for the MSM USB controller. This is a standard chipidea PORTSC definition, where a PHY_TYPE of 10b (<<30) is ULPI and 11b (<<30) is SERIAL. Fix the definitions and use them correctly in the driver code. Signed-off-by: Tim Bird Signed-off-by: Ivan T. Ivanov Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-msm-usb.c | 8 +++++--- include/linux/usb/msm_hsusb_hw.h | 5 +++-- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index bb339963f8bb..db8d96377620 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -287,8 +287,9 @@ static int msm_link_reset(struct msm_otg *motg) if (motg->phy_number) writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2); + /* put transceiver in serial mode as part of reset */ val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK; - writel(val | PORTSC_PTS_ULPI, USB_PORTSC); + writel(val | PORTSC_PTS_SERIAL, USB_PORTSC); return 0; } @@ -308,8 +309,9 @@ static int msm_otg_reset(struct usb_phy *phy) if (cnt >= LINK_RESET_TIMEOUT_USEC) return -ETIMEDOUT; - /* select ULPI phy */ - writel(0x80000000, USB_PORTSC); + /* select ULPI phy and clear other status/control bits in PORTSC */ + writel(PORTSC_PTS_ULPI, USB_PORTSC); + writel(0x0, USB_AHBBURST); writel(0x08, USB_AHBMODE); diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index 575c74397e52..98d3dd8976e5 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -31,8 +31,9 @@ #define USB_USBINTR (MSM_USB_BASE + 0x0148) #define PORTSC_PHCD (1 << 23) /* phy suspend mode */ -#define PORTSC_PTS_MASK (3 << 30) -#define PORTSC_PTS_ULPI (3 << 30) +#define PORTSC_PTS_MASK (3 << 30) +#define PORTSC_PTS_ULPI (2 << 30) +#define PORTSC_PTS_SERIAL (3 << 30) #define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170) #define ULPI_RUN (1 << 30) -- cgit v1.2.3 From 30bf8667cef5655ddfaedf043f13d03606844213 Mon Sep 17 00:00:00 2001 From: Tim Bird Date: Mon, 28 Apr 2014 16:34:20 +0300 Subject: usb: phy: msm: Select secondary PHY via TCSR Select the secondary PHY using the TCSR register, if phy-num=1 in the DTS (or phy_number is set in the platform data). The SOC has 2 PHYs which can be used with the OTG port, and this code allows configuring the correct one. Note: This resolves the problem I was seeing where I couldn't get the USB driver working at all on a dragonboard, from cold boot. This patch depends on patch 5/14 from Ivan's msm USB patch set. It does not use DT for the register address, as there's no evidence that this address changes between SoC versions. Signed-off-by: Tim Bird Signed-off-by: Ivan T. Ivanov Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-msm-usb.c | 14 ++++++++++++++ include/linux/usb/msm_hsusb_hw.h | 3 +++ 2 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index db8d96377620..9437bcf8c367 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -1489,6 +1489,7 @@ static int msm_otg_probe(struct platform_device *pdev) struct resource *res; struct msm_otg *motg; struct usb_phy *phy; + void __iomem *phy_select; motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL); if (!motg) { @@ -1553,6 +1554,19 @@ static int msm_otg_probe(struct platform_device *pdev) if (IS_ERR(motg->regs)) return PTR_ERR(motg->regs); + /* + * NOTE: The PHYs can be multiplexed between the chipidea controller + * and the dwc3 controller, using a single bit. It is important that + * the dwc3 driver does not set this bit in an incompatible way. + */ + if (motg->phy_number) { + phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4); + if (IS_ERR(phy_select)) + return PTR_ERR(phy_select); + /* Enable second PHY with the OTG port */ + writel_relaxed(0x1, phy_select); + } + dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs); motg->irq = platform_get_irq(pdev, 0); diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index 98d3dd8976e5..a29f6030afb1 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -16,6 +16,9 @@ #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ #define __LINUX_USB_GADGET_MSM72K_UDC_H__ +/* USB phy selector - in TCSR address range */ +#define USB2_PHY_SEL 0xfd4ab000 + #define USB_AHBBURST (MSM_USB_BASE + 0x0090) #define USB_AHBMODE (MSM_USB_BASE + 0x0098) #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ -- cgit v1.2.3 From 01799b622217ffebdc95e8e0aedbd4cff6a35a50 Mon Sep 17 00:00:00 2001 From: "Ivan T. Ivanov" Date: Mon, 28 Apr 2014 16:34:22 +0300 Subject: usb: phy: msm: Vote for corner of VDD CX instead of voltage of VDD CX New platform uses RBCPR hardware feature, with that voting for absolute voltage of VDD CX is not required. Hence vote for corner of VDD CX which uses nominal corner voltage on VDD CX. Signed-off-by: Ivan T. Ivanov Cc: Mayank Rana Signed-off-by: Felipe Balbi --- .../devicetree/bindings/usb/msm-hsusb.txt | 5 ++++ drivers/usb/phy/phy-msm-usb.c | 35 +++++++++++++++++----- include/linux/usb/msm_hsusb.h | 1 + 3 files changed, 33 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt index 066966706ca1..2826f2af503a 100644 --- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt +++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt @@ -65,6 +65,10 @@ Optional properties: Some platforms may have configuration to allow USB controller work with any of the two HSPHYs present. +- qcom,vdd-levels: This property must be a list of three integer values + (no, min, max) where each value represents either a voltage + in microvolts or a value corresponding to voltage corner. + Example HSUSB OTG controller device node: usb@f9a55000 { @@ -87,4 +91,5 @@ Example HSUSB OTG controller device node: qcom,otg-control = <1>; qcom,phy-init-sequence = < -1 0x63 >; + qcom,vdd-levels = <1 5 7>; }; diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 366527ecbdd1..8e7956eb8a77 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -62,6 +62,13 @@ #define USB_PHY_VDD_DIG_VOL_MIN 1000000 /* uV */ #define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */ +#define USB_PHY_SUSP_DIG_VOL 500000 /* uV */ + +enum vdd_levels { + VDD_LEVEL_NONE = 0, + VDD_LEVEL_MIN, + VDD_LEVEL_MAX, +}; static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init) { @@ -69,8 +76,8 @@ static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init) if (init) { ret = regulator_set_voltage(motg->vddcx, - USB_PHY_VDD_DIG_VOL_MIN, - USB_PHY_VDD_DIG_VOL_MAX); + motg->vdd_levels[VDD_LEVEL_MIN], + motg->vdd_levels[VDD_LEVEL_MAX]); if (ret) { dev_err(motg->phy.dev, "Cannot set vddcx voltage\n"); return ret; @@ -81,7 +88,7 @@ static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init) dev_err(motg->phy.dev, "unable to enable hsusb vddcx\n"); } else { ret = regulator_set_voltage(motg->vddcx, 0, - USB_PHY_VDD_DIG_VOL_MAX); + motg->vdd_levels[VDD_LEVEL_MAX]); if (ret) dev_err(motg->phy.dev, "Cannot set vddcx voltage\n"); ret = regulator_disable(motg->vddcx); @@ -435,17 +442,16 @@ static int msm_phy_init(struct usb_phy *phy) #ifdef CONFIG_PM -#define USB_PHY_SUSP_DIG_VOL 500000 static int msm_hsusb_config_vddcx(struct msm_otg *motg, int high) { - int max_vol = USB_PHY_VDD_DIG_VOL_MAX; + int max_vol = motg->vdd_levels[VDD_LEVEL_MAX]; int min_vol; int ret; if (high) - min_vol = USB_PHY_VDD_DIG_VOL_MIN; + min_vol = motg->vdd_levels[VDD_LEVEL_MIN]; else - min_vol = USB_PHY_SUSP_DIG_VOL; + min_vol = motg->vdd_levels[VDD_LEVEL_NONE]; ret = regulator_set_voltage(motg->vddcx, min_vol, max_vol); if (ret) { @@ -1441,7 +1447,7 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg) struct device_node *node = pdev->dev.of_node; struct property *prop; int len, ret, words; - u32 val; + u32 val, tmp[3]; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) @@ -1472,6 +1478,19 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg) if (!of_property_read_u32(node, "qcom,phy-num", &val) && val < 2) motg->phy_number = val; + motg->vdd_levels[VDD_LEVEL_NONE] = USB_PHY_SUSP_DIG_VOL; + motg->vdd_levels[VDD_LEVEL_MIN] = USB_PHY_VDD_DIG_VOL_MIN; + motg->vdd_levels[VDD_LEVEL_MAX] = USB_PHY_VDD_DIG_VOL_MAX; + + if (of_get_property(node, "qcom,vdd-levels", &len) && + len == sizeof(tmp)) { + of_property_read_u32_array(node, "qcom,vdd-levels", + tmp, len / sizeof(*tmp)); + motg->vdd_levels[VDD_LEVEL_NONE] = tmp[VDD_LEVEL_NONE]; + motg->vdd_levels[VDD_LEVEL_MIN] = tmp[VDD_LEVEL_MIN]; + motg->vdd_levels[VDD_LEVEL_MAX] = tmp[VDD_LEVEL_MAX]; + } + prop = of_find_property(node, "qcom,phy-init-sequence", &len); if (!prop || !len) return 0; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 4628f1a4713e..b0a39243295a 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -169,6 +169,7 @@ struct msm_otg { struct reset_control *phy_rst; struct reset_control *link_rst; + int vdd_levels[3]; }; #endif -- cgit v1.2.3 From 0302f71c0aa59571ac306f93068fbbfe65ea349b Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Mon, 30 Dec 2013 12:12:12 -0500 Subject: efi: add helper function to get UEFI params from FDT ARM and ARM64 architectures use the device tree to pass UEFI parameters from stub to kernel. These parameters are things known to the stub but not discoverable by the kernel after the stub calls ExitBootSerives(). There is a helper function in: drivers/firmware/efi/fdt.c which the stub uses to add the UEFI parameters to the device tree. This patch adds a complimentary helper function which UEFI runtime support may use to retrieve the parameters from the device tree. If an architecture wants to use this helper, it should select CONFIG_EFI_PARAMS_FROM_FDT. Signed-off-by: Mark Salter Signed-off-by: Leif Lindholm Acked-by: Catalin Marinas Signed-off-by: Matt Fleming --- drivers/firmware/efi/Kconfig | 7 ++++ drivers/firmware/efi/efi.c | 79 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/efi.h | 9 +++++ 3 files changed, 95 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 1e75f48b61f8..d420ae2d3413 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -47,6 +47,13 @@ config EFI_RUNTIME_MAP See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. +config EFI_PARAMS_FROM_FDT + bool + help + Select this config option from the architecture Kconfig if + the EFI runtime support gets system table address, memory + map address, and other parameters from the device tree. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index af20f1712337..cd36deb619fa 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include struct efi __read_mostly efi = { @@ -318,3 +320,80 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables) return 0; } + +#ifdef CONFIG_EFI_PARAMS_FROM_FDT + +#define UEFI_PARAM(name, prop, field) \ + { \ + { name }, \ + { prop }, \ + offsetof(struct efi_fdt_params, field), \ + FIELD_SIZEOF(struct efi_fdt_params, field) \ + } + +static __initdata struct { + const char name[32]; + const char propname[32]; + int offset; + int size; +} dt_params[] = { + UEFI_PARAM("System Table", "linux,uefi-system-table", system_table), + UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap), + UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size), + UEFI_PARAM("MemMap Desc. Size", "linux,uefi-mmap-desc-size", desc_size), + UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver) +}; + +struct param_info { + int verbose; + void *params; +}; + +static int __init fdt_find_uefi_params(unsigned long node, const char *uname, + int depth, void *data) +{ + struct param_info *info = data; + void *prop, *dest; + unsigned long len; + u64 val; + int i; + + if (depth != 1 || + (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) + return 0; + + pr_info("Getting parameters from FDT:\n"); + + for (i = 0; i < ARRAY_SIZE(dt_params); i++) { + prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); + if (!prop) { + pr_err("Can't find %s in device tree!\n", + dt_params[i].name); + return 0; + } + dest = info->params + dt_params[i].offset; + + val = of_read_number(prop, len / sizeof(u32)); + + if (dt_params[i].size == sizeof(u32)) + *(u32 *)dest = val; + else + *(u64 *)dest = val; + + if (info->verbose) + pr_info(" %s: 0x%0*llx\n", dt_params[i].name, + dt_params[i].size * 2, val); + } + return 1; +} + +int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) +{ + struct param_info info; + + info.verbose = verbose; + info.params = params; + + return of_scan_flat_dt(fdt_find_uefi_params, &info); +} +#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 6a4d8e27d1d7..cd0172e796cb 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -664,6 +664,14 @@ struct efi_memory_map { unsigned long desc_size; }; +struct efi_fdt_params { + u64 system_table; + u64 mmap; + u32 mmap_size; + u32 desc_size; + u32 desc_ver; +}; + typedef struct { u32 revision; u32 parent_handle; @@ -861,6 +869,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource, extern void efi_get_time(struct timespec *now); extern int efi_set_rtc_mmss(const struct timespec *now); extern void efi_reserve_boot_services(void); +extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); extern struct efi_memory_map memmap; /* Iterate through an efi_memory_map */ -- cgit v1.2.3 From 263b4a30bfdb0d756ae9c70c6ff2eef1eb951770 Mon Sep 17 00:00:00 2001 From: Roy Franz Date: Wed, 8 Jan 2014 17:54:19 -0800 Subject: efi: Add shared FDT related functions for ARM/ARM64 Both ARM and ARM64 stubs will update the device tree that they pass to the kernel. In both cases they primarily need to add the same UEFI related information, so the function can be shared. Create a new FDT related file for this to avoid use of architecture #ifdefs in efi-stub-helper.c. Signed-off-by: Roy Franz [ Fixed memory node deletion code. ] Signed-off-by: Mark Rutland Signed-off-by: Leif Lindholm Acked-by: Grant Likely Acked-by: Catalin Marinas Signed-off-by: Matt Fleming --- drivers/firmware/efi/fdt.c | 285 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/efi.h | 3 + 2 files changed, 288 insertions(+) create mode 100644 drivers/firmware/efi/fdt.c (limited to 'include/linux') diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c new file mode 100644 index 000000000000..5c6a8e8a9580 --- /dev/null +++ b/drivers/firmware/efi/fdt.c @@ -0,0 +1,285 @@ +/* + * FDT related Helper functions used by the EFI stub on multiple + * architectures. This should be #included by the EFI stub + * implementation files. + * + * Copyright 2013 Linaro Limited; author Roy Franz + * + * This file is part of the Linux kernel, and is made available + * under the terms of the GNU General Public License version 2. + * + */ + +static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, + unsigned long orig_fdt_size, + void *fdt, int new_fdt_size, char *cmdline_ptr, + u64 initrd_addr, u64 initrd_size, + efi_memory_desc_t *memory_map, + unsigned long map_size, unsigned long desc_size, + u32 desc_ver) +{ + int node, prev; + int status; + u32 fdt_val32; + u64 fdt_val64; + + /* + * Copy definition of linux_banner here. Since this code is + * built as part of the decompressor for ARM v7, pulling + * in version.c where linux_banner is defined for the + * kernel brings other kernel dependencies with it. + */ + const char linux_banner[] = + "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" + LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n"; + + /* Do some checks on provided FDT, if it exists*/ + if (orig_fdt) { + if (fdt_check_header(orig_fdt)) { + pr_efi_err(sys_table, "Device Tree header not valid!\n"); + return EFI_LOAD_ERROR; + } + /* + * We don't get the size of the FDT if we get if from a + * configuration table. + */ + if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { + pr_efi_err(sys_table, "Truncated device tree! foo!\n"); + return EFI_LOAD_ERROR; + } + } + + if (orig_fdt) + status = fdt_open_into(orig_fdt, fdt, new_fdt_size); + else + status = fdt_create_empty_tree(fdt, new_fdt_size); + + if (status != 0) + goto fdt_set_fail; + + /* + * Delete any memory nodes present. We must delete nodes which + * early_init_dt_scan_memory may try to use. + */ + prev = 0; + for (;;) { + const char *type, *name; + int len; + + node = fdt_next_node(fdt, prev, NULL); + if (node < 0) + break; + + type = fdt_getprop(fdt, node, "device_type", &len); + if (type && strncmp(type, "memory", len) == 0) { + fdt_del_node(fdt, node); + continue; + } + + prev = node; + } + + node = fdt_subnode_offset(fdt, 0, "chosen"); + if (node < 0) { + node = fdt_add_subnode(fdt, 0, "chosen"); + if (node < 0) { + status = node; /* node is error code when negative */ + goto fdt_set_fail; + } + } + + if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) { + status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr, + strlen(cmdline_ptr) + 1); + if (status) + goto fdt_set_fail; + } + + /* Set initrd address/end in device tree, if present */ + if (initrd_size != 0) { + u64 initrd_image_end; + u64 initrd_image_start = cpu_to_fdt64(initrd_addr); + + status = fdt_setprop(fdt, node, "linux,initrd-start", + &initrd_image_start, sizeof(u64)); + if (status) + goto fdt_set_fail; + initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size); + status = fdt_setprop(fdt, node, "linux,initrd-end", + &initrd_image_end, sizeof(u64)); + if (status) + goto fdt_set_fail; + } + + /* Add FDT entries for EFI runtime services in chosen node. */ + node = fdt_subnode_offset(fdt, 0, "chosen"); + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); + status = fdt_setprop(fdt, node, "linux,uefi-system-table", + &fdt_val64, sizeof(fdt_val64)); + if (status) + goto fdt_set_fail; + + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", + &fdt_val64, sizeof(fdt_val64)); + if (status) + goto fdt_set_fail; + + fdt_val32 = cpu_to_fdt32(map_size); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", + &fdt_val32, sizeof(fdt_val32)); + if (status) + goto fdt_set_fail; + + fdt_val32 = cpu_to_fdt32(desc_size); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", + &fdt_val32, sizeof(fdt_val32)); + if (status) + goto fdt_set_fail; + + fdt_val32 = cpu_to_fdt32(desc_ver); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", + &fdt_val32, sizeof(fdt_val32)); + if (status) + goto fdt_set_fail; + + /* + * Add kernel version banner so stub/kernel match can be + * verified. + */ + status = fdt_setprop_string(fdt, node, "linux,uefi-stub-kern-ver", + linux_banner); + if (status) + goto fdt_set_fail; + + return EFI_SUCCESS; + +fdt_set_fail: + if (status == -FDT_ERR_NOSPACE) + return EFI_BUFFER_TOO_SMALL; + + return EFI_LOAD_ERROR; +} + +#ifndef EFI_FDT_ALIGN +#define EFI_FDT_ALIGN EFI_PAGE_SIZE +#endif + +/* + * Allocate memory for a new FDT, then add EFI, commandline, and + * initrd related fields to the FDT. This routine increases the + * FDT allocation size until the allocated memory is large + * enough. EFI allocations are in EFI_PAGE_SIZE granules, + * which are fixed at 4K bytes, so in most cases the first + * allocation should succeed. + * EFI boot services are exited at the end of this function. + * There must be no allocations between the get_memory_map() + * call and the exit_boot_services() call, so the exiting of + * boot services is very tightly tied to the creation of the FDT + * with the final memory map in it. + */ + +efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, + void *handle, + unsigned long *new_fdt_addr, + unsigned long max_addr, + u64 initrd_addr, u64 initrd_size, + char *cmdline_ptr, + unsigned long fdt_addr, + unsigned long fdt_size) +{ + unsigned long map_size, desc_size; + u32 desc_ver; + unsigned long mmap_key; + efi_memory_desc_t *memory_map; + unsigned long new_fdt_size; + efi_status_t status; + + /* + * Estimate size of new FDT, and allocate memory for it. We + * will allocate a bigger buffer if this ends up being too + * small, so a rough guess is OK here. + */ + new_fdt_size = fdt_size + EFI_PAGE_SIZE; + while (1) { + status = efi_high_alloc(sys_table, new_fdt_size, EFI_FDT_ALIGN, + new_fdt_addr, max_addr); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); + goto fail; + } + + /* + * Now that we have done our final memory allocation (and free) + * we can get the memory map key needed for + * exit_boot_services(). + */ + status = efi_get_memory_map(sys_table, &memory_map, &map_size, + &desc_size, &desc_ver, &mmap_key); + if (status != EFI_SUCCESS) + goto fail_free_new_fdt; + + status = update_fdt(sys_table, + (void *)fdt_addr, fdt_size, + (void *)*new_fdt_addr, new_fdt_size, + cmdline_ptr, initrd_addr, initrd_size, + memory_map, map_size, desc_size, desc_ver); + + /* Succeeding the first time is the expected case. */ + if (status == EFI_SUCCESS) + break; + + if (status == EFI_BUFFER_TOO_SMALL) { + /* + * We need to allocate more space for the new + * device tree, so free existing buffer that is + * too small. Also free memory map, as we will need + * to get new one that reflects the free/alloc we do + * on the device tree buffer. + */ + efi_free(sys_table, new_fdt_size, *new_fdt_addr); + sys_table->boottime->free_pool(memory_map); + new_fdt_size += EFI_PAGE_SIZE; + } else { + pr_efi_err(sys_table, "Unable to constuct new device tree.\n"); + goto fail_free_mmap; + } + } + + /* Now we are ready to exit_boot_services.*/ + status = sys_table->boottime->exit_boot_services(handle, mmap_key); + + + if (status == EFI_SUCCESS) + return status; + + pr_efi_err(sys_table, "Exit boot services failed.\n"); + +fail_free_mmap: + sys_table->boottime->free_pool(memory_map); + +fail_free_new_fdt: + efi_free(sys_table, new_fdt_size, *new_fdt_addr); + +fail: + return EFI_LOAD_ERROR; +} + +static void *get_fdt(efi_system_table_t *sys_table) +{ + efi_guid_t fdt_guid = DEVICE_TREE_GUID; + efi_config_table_t *tables; + void *fdt; + int i; + + tables = (efi_config_table_t *) sys_table->tables; + fdt = NULL; + + for (i = 0; i < sys_table->nr_tables; i++) + if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) { + fdt = (void *) tables[i].table; + break; + } + + return fdt; +} diff --git a/include/linux/efi.h b/include/linux/efi.h index cd0172e796cb..41bbf8ba4ba8 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -575,6 +575,9 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long si #define EFI_FILE_SYSTEM_GUID \ EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) +#define DEVICE_TREE_GUID \ + EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) + typedef struct { efi_guid_t guid; u64 table; -- cgit v1.2.3 From 774b514390b1eb8476bc759262790762bd1ef45a Mon Sep 17 00:00:00 2001 From: Maxime COQUELIN Date: Wed, 29 Jan 2014 17:24:07 +0100 Subject: clk: divider: Add round to closest divider In some cases, we want to be able to round the divider to the closest one, instead than rounding up. This patch adds a new CLK_DIVIDER_ROUND_CLOSEST flag to specify the divider has to round to closest div, keeping rounding up as de default behaviour. Signed-off-by: Maxime Coquelin Signed-off-by: Mike Turquette --- drivers/clk/clk-divider.c | 69 ++++++++++++++++++++++++++++++++++++++++++-- include/linux/clk-provider.h | 3 ++ 2 files changed, 70 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 4637697c139f..c57294563a98 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -43,6 +43,17 @@ static unsigned int _get_table_maxdiv(const struct clk_div_table *table) return maxdiv; } +static unsigned int _get_table_mindiv(const struct clk_div_table *table) +{ + unsigned int mindiv = UINT_MAX; + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->div < mindiv) + mindiv = clkt->div; + return mindiv; +} + static unsigned int _get_maxdiv(struct clk_divider *divider) { if (divider->flags & CLK_DIVIDER_ONE_BASED) @@ -162,6 +173,24 @@ static int _round_up_table(const struct clk_div_table *table, int div) return up; } +static int _round_down_table(const struct clk_div_table *table, int div) +{ + const struct clk_div_table *clkt; + int down = _get_table_mindiv(table); + + for (clkt = table; clkt->div; clkt++) { + if (clkt->div == div) + return clkt->div; + else if (clkt->div > div) + continue; + + if ((div - clkt->div) < (div - down)) + down = clkt->div; + } + + return down; +} + static int _div_round_up(struct clk_divider *divider, unsigned long parent_rate, unsigned long rate) { @@ -175,6 +204,42 @@ static int _div_round_up(struct clk_divider *divider, return div; } +static int _div_round_closest(struct clk_divider *divider, + unsigned long parent_rate, unsigned long rate) +{ + int up, down, div; + + up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); + + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) { + up = __roundup_pow_of_two(div); + down = __rounddown_pow_of_two(div); + } else if (divider->table) { + up = _round_up_table(divider->table, div); + down = _round_down_table(divider->table, div); + } + + return (up - div) <= (div - down) ? up : down; +} + +static int _div_round(struct clk_divider *divider, unsigned long parent_rate, + unsigned long rate) +{ + if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) + return _div_round_closest(divider, parent_rate, rate); + + return _div_round_up(divider, parent_rate, rate); +} + +static bool _is_best_div(struct clk_divider *divider, + int rate, int now, int best) +{ + if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) + return abs(rate - now) < abs(rate - best); + + return now <= rate && now > best; +} + static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { @@ -190,7 +255,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; - bestdiv = _div_round_up(divider, parent_rate, rate); + bestdiv = _div_round(divider, parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; @@ -217,7 +282,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), MULT_ROUND_UP(rate, i)); now = DIV_ROUND_UP(parent_rate, i); - if (now <= rate && now > best) { + if (_is_best_div(divider, rate, now, best)) { bestdiv = i; best = now; *best_parent_rate = parent_rate; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 511917416fb0..59e2eb58f555 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -312,6 +312,8 @@ struct clk_div_table { * of this register, and mask of divider bits are in higher 16-bit of this * register. While setting the divider bits, higher 16-bit should also be * updated to indicate changing divider bits. + * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded + * to the closest integer instead of the up one. */ struct clk_divider { struct clk_hw hw; @@ -327,6 +329,7 @@ struct clk_divider { #define CLK_DIVIDER_POWER_OF_TWO BIT(1) #define CLK_DIVIDER_ALLOW_ZERO BIT(2) #define CLK_DIVIDER_HIWORD_MASK BIT(3) +#define CLK_DIVIDER_ROUND_CLOSEST BIT(4) extern const struct clk_ops clk_divider_ops; struct clk *clk_register_divider(struct device *dev, const char *name, -- cgit v1.2.3 From c1b3156f121fd301191e0b4c5fa2fec42cd17871 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 28 Apr 2014 18:43:32 +0200 Subject: drbd: use blk_set_stacking_limits() ...instead directly assigning to q->limits.discard_zeroes_data Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_nl.c | 12 ++++++------ include/linux/drbd.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index a187c5b0da27..b4fc401b587f 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1114,15 +1114,18 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ struct request_queue * const q = device->rq_queue; unsigned int max_hw_sectors = max_bio_size >> 9; unsigned int max_segments = 0; + struct request_queue *b = NULL; if (get_ldev_if_state(device, D_ATTACHING)) { - struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue; + b = device->ldev->backing_bdev->bd_disk->queue; max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); rcu_read_lock(); max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; rcu_read_unlock(); - put_ldev(device); + + blk_set_stacking_limits(&q->limits); + blk_queue_max_write_same_sectors(q, 0); } blk_queue_logical_block_size(q, 512); @@ -1131,14 +1134,11 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); - if (get_ldev_if_state(device, D_ATTACHING)) { - struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue; + if (b) { struct drbd_connection *connection = first_peer_device(device)->connection; if (blk_queue_discard(b) && (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { - /* inherit from backing queue */ - q->limits.discard_zeroes_data = 1; /* For now, don't allow more than one activity log extent worth of data * to be discarded in one go. We may need to rework drbd_al_begin_io() * to allow for even larger discard ranges */ diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 3dbe9bd57a09..fffd4b8563cb 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -52,7 +52,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.3" +#define REL_VERSION "8.4.4" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 -- cgit v1.2.3 From 02df6fe145715f1d3858c0c65aed991f148b70b4 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 28 Apr 2014 18:43:33 +0200 Subject: drbd: Test cstate while holding req_lock In case a connection transitions into C_TIMEOUT within the timer function (request_timer_fn()) we need to make sure that the receiver thread (potentially running on a different CPU) sees the updated cstate later on. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_nl.c | 3 ++- include/linux/drbd.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index b4fc401b587f..f4d3aff89aa1 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -442,12 +442,13 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) char *ex_to_string; int r; + spin_lock_irq(&connection->resource->req_lock); if (connection->cstate >= C_WF_REPORT_PARAMS) { drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n"); + spin_unlock_irq(&connection->resource->req_lock); return false; } - spin_lock_irq(&connection->resource->req_lock); connect_cnt = connection->connect_cnt; spin_unlock_irq(&connection->resource->req_lock); diff --git a/include/linux/drbd.h b/include/linux/drbd.h index fffd4b8563cb..3dbe9bd57a09 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -52,7 +52,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.4" +#define REL_VERSION "8.4.3" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 -- cgit v1.2.3 From 52c324f8a87b336496d0f5e9d8dff1aa32bb08cd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 1 May 2014 00:13:47 +0200 Subject: cpuidle: Combine cpuidle_enabled() with cpuidle_select() Since both cpuidle_enabled() and cpuidle_select() are only called by cpuidle_idle_call(), it is not really useful to keep them separate and combining them will help to avoid complicating cpuidle_idle_call() even further if governors are changed to return error codes sometimes. This code modification shouldn't lead to any functional changes. Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/cpuidle.c | 26 ++++++-------------------- include/linux/cpuidle.h | 5 ----- kernel/sched/idle.c | 20 +++++++------------- 3 files changed, 13 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 8236746e46bb..f38359f64cc6 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -64,26 +64,6 @@ int cpuidle_play_dead(void) return -ENODEV; } -/** - * cpuidle_enabled - check if the cpuidle framework is ready - * @dev: cpuidle device for this cpu - * @drv: cpuidle driver for this cpu - * - * Return 0 on success, otherwise: - * -NODEV : the cpuidle framework is not available - * -EBUSY : the cpuidle framework is not initialized - */ -int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev) -{ - if (off || !initialized) - return -ENODEV; - - if (!drv || !dev || !dev->enabled) - return -EBUSY; - - return 0; -} - /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu @@ -138,6 +118,12 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, */ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) { + if (off || !initialized) + return -ENODEV; + + if (!drv || !dev || !dev->enabled) + return -EBUSY; + return cpuidle_curr_governor->select(drv, dev); } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b0238cba440b..a8d5bd391a26 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -120,8 +120,6 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); -extern int cpuidle_enabled(struct cpuidle_driver *drv, - struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_enter(struct cpuidle_driver *drv, @@ -149,9 +147,6 @@ extern int cpuidle_play_dead(void); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } -static inline int cpuidle_enabled(struct cpuidle_driver *drv, - struct cpuidle_device *dev) -{return -ENODEV; } static inline int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8f4390a079c7..a8f12247ce7c 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -101,19 +101,13 @@ static int cpuidle_idle_call(void) rcu_idle_enter(); /* - * Check if the cpuidle framework is ready, otherwise fallback - * to the default arch specific idle method + * Ask the cpuidle framework to choose a convenient idle state. + * Fall back to the default arch specific idle method on errors. */ - ret = cpuidle_enabled(drv, dev); - - if (!ret) { - /* - * Ask the governor to choose an idle state it thinks - * it is convenient to go to. There is *always* a - * convenient idle state - */ - next_state = cpuidle_select(drv, dev); + next_state = cpuidle_select(drv, dev); + ret = next_state; + if (ret >= 0) { /* * The idle task must be scheduled, it is pointless to * go to idle, just update no idle residency and get @@ -140,7 +134,7 @@ static int cpuidle_idle_call(void) CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); - if (!ret) { + if (ret >= 0) { trace_cpu_idle_rcuidle(next_state, dev->cpu); /* @@ -175,7 +169,7 @@ static int cpuidle_idle_call(void) * We can't use the cpuidle framework, let's use the default * idle routine */ - if (ret) + if (ret < 0) arch_cpu_idle(); __current_set_polling(); -- cgit v1.2.3 From 034cd97ebda4062eb4402a6cf963ccd262caa86a Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 14 Apr 2014 15:28:35 +0200 Subject: PCI/MSI: Remove pci_enable_msi_block() There are no users of pci_enable_msi_block() function left. Obsolete it in favor of pci_enable_msi_range() and pci_enable_msi_exact() functions. Previously, we called arch_setup_msi_irqs() once, requesting the same vector count we passed to arch_msi_check_device(). Now we may call it several times: if it returns failure, we may retry and request fewer vectors. We don't keep track of the vector count we initially passed to arch_msi_check_device(). We only keep track of the number of vectors successfully set up by arch_setup_msi_irqs(), and this is what we use to clean things up when disabling MSI. Therefore, we assume that arch_msi_check_device() does nothing that will have to be cleaned up later. [bhelgaas: changelog] Signed-off-by: Alexander Gordeev Signed-off-by: Bjorn Helgaas --- drivers/pci/msi.c | 79 ++++++++++++++++++++++------------------------------- include/linux/pci.h | 5 +--- 2 files changed, 34 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 04130c3f9cf6..36dd0caa1759 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -879,50 +879,6 @@ int pci_msi_vec_count(struct pci_dev *dev) } EXPORT_SYMBOL(pci_msi_vec_count); -/** - * pci_enable_msi_block - configure device's MSI capability structure - * @dev: device to configure - * @nvec: number of interrupts to configure - * - * Allocate IRQs for a device with the MSI capability. - * This function returns a negative errno if an error occurs. If it - * is unable to allocate the number of interrupts requested, it returns - * the number of interrupts it might be able to allocate. If it successfully - * allocates at least the number of interrupts requested, it returns 0 and - * updates the @dev's irq member to the lowest new interrupt number; the - * other interrupt numbers allocated to this device are consecutive. - */ -int pci_enable_msi_block(struct pci_dev *dev, int nvec) -{ - int status, maxvec; - - if (dev->current_state != PCI_D0) - return -EINVAL; - - maxvec = pci_msi_vec_count(dev); - if (maxvec < 0) - return maxvec; - if (nvec > maxvec) - return maxvec; - - status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); - if (status) - return status; - - WARN_ON(!!dev->msi_enabled); - - /* Check whether driver already requested MSI-X irqs */ - if (dev->msix_enabled) { - dev_info(&dev->dev, "can't enable MSI " - "(MSI-X already enabled)\n"); - return -EINVAL; - } - - status = msi_capability_init(dev, nvec); - return status; -} -EXPORT_SYMBOL(pci_enable_msi_block); - void pci_msi_shutdown(struct pci_dev *dev) { struct msi_desc *desc; @@ -1128,14 +1084,45 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) **/ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { - int nvec = maxvec; + int nvec; int rc; + if (dev->current_state != PCI_D0) + return -EINVAL; + + WARN_ON(!!dev->msi_enabled); + + /* Check whether driver already requested MSI-X irqs */ + if (dev->msix_enabled) { + dev_info(&dev->dev, + "can't enable MSI (MSI-X already enabled)\n"); + return -EINVAL; + } + if (maxvec < minvec) return -ERANGE; + nvec = pci_msi_vec_count(dev); + if (nvec < 0) + return nvec; + else if (nvec < minvec) + return -EINVAL; + else if (nvec > maxvec) + nvec = maxvec; + + do { + rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + do { - rc = pci_enable_msi_block(dev, nvec); + rc = msi_capability_init(dev, nvec); if (rc < 0) { return rc; } else if (rc > 0) { diff --git a/include/linux/pci.h b/include/linux/pci.h index aab57b4abe7f..499755e6dab5 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1158,7 +1158,6 @@ struct msix_entry { #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); -int pci_enable_msi_block(struct pci_dev *dev, int nvec); void pci_msi_shutdown(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); int pci_msix_vec_count(struct pci_dev *dev); @@ -1188,8 +1187,6 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, } #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } -static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec) -{ return -ENOSYS; } static inline void pci_msi_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msi(struct pci_dev *dev) { } static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1244,7 +1241,7 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif -#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) +#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1) #ifdef CONFIG_HT_IRQ /* The functions a driver should call */ -- cgit v1.2.3 From ee3468739ed83d862dbbd90397aff5258f8f2c8e Mon Sep 17 00:00:00 2001 From: Brian W Hart Date: Thu, 1 May 2014 14:32:35 -0500 Subject: fbdev/fb.h: silence warning with -Wsign-compare MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Silence the warning when building with -Wsign-compare when fb.h is included: include/linux/fb.h: In function ‘__fb_pad_aligned_buffer’: include/linux/fb.h:650:17: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (j = 0; j < s_pitch; j++) ^ Signed-off-by: Brian W Hart Signed-off-by: Tomi Valkeinen --- include/linux/fb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fb.h b/include/linux/fb.h index 506242979eea..b6bfda99add3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -642,7 +642,7 @@ static inline void unlock_fb_info(struct fb_info *info) static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height) { - int i, j; + u32 i, j; d_pitch -= s_pitch; -- cgit v1.2.3 From 638b43b347216bab1a989b036a92eb7d9d9ee421 Mon Sep 17 00:00:00 2001 From: Peter Meerwald Date: Wed, 5 Feb 2014 16:57:00 +0000 Subject: iio: Add TEMP_AMBIENT and TEMP_OBJECT channel modifiers useful for contactless temperature sensors to distinguish between the ambient temperature and the temperature of the object Signed-off-by: Peter Meerwald Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 2 ++ include/linux/iio/types.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index de8b1c2ed4b4..4b1f375c5659 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -85,6 +85,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_LIGHT_GREEN] = "green", [IIO_MOD_LIGHT_BLUE] = "blue", [IIO_MOD_QUATERNION] = "quaternion", + [IIO_MOD_TEMP_AMBIENT] = "ambient", + [IIO_MOD_TEMP_OBJECT] = "object", }; /* relies on pairs of these shared then separate */ diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 4fdab2e843b4..d480631eabc2 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -54,6 +54,8 @@ enum iio_modifier { IIO_MOD_LIGHT_GREEN, IIO_MOD_LIGHT_BLUE, IIO_MOD_QUATERNION, + IIO_MOD_TEMP_AMBIENT, + IIO_MOD_TEMP_OBJECT, }; enum iio_event_type { -- cgit v1.2.3 From 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 7 Mar 2013 14:53:45 +0100 Subject: genirq: Sanitize spurious interrupt detection of threaded irqs Till reported that the spurious interrupt detection of threaded interrupts is broken in two ways: - note_interrupt() is called for each action thread of a shared interrupt line. That's wrong as we are only interested whether none of the device drivers felt responsible for the interrupt, but by calling multiple times for a single interrupt line we account IRQ_NONE even if one of the drivers felt responsible. - note_interrupt() when called from the thread handler is not serialized. That leaves the members of irq_desc which are used for the spurious detection unprotected. To solve this we need to defer the spurious detection of a threaded interrupt to the next hardware interrupt context where we have implicit serialization. If note_interrupt is called with action_ret == IRQ_WAKE_THREAD, we check whether the previous interrupt requested a deferred check. If not, we request a deferred check for the next hardware interrupt and return. If set, we check whether one of the interrupt threads signaled success. Depending on this information we feed the result into the spurious detector. If one primary handler of a shared interrupt returns IRQ_HANDLED we disable the deferred check of irq threads on the same line, as we have found at least one device driver who cared. Reported-by: Till Straumann Signed-off-by: Thomas Gleixner Tested-by: Austin Schuh Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Cc: Pavel Pisa Cc: Marc Kleine-Budde Cc: linux-can@vger.kernel.org Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1303071450130.22263@ionos --- include/linux/irqdesc.h | 4 ++ kernel/irq/manage.c | 4 +- kernel/irq/spurious.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 108 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 26e2661d3935..472c021a2d4f 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -27,6 +27,8 @@ struct irq_desc; * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts + * @threads_handled: stats field for deferred spurious detection of threaded handlers + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes @@ -52,6 +54,8 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; #ifdef CONFIG_SMP diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d34131ca372b..3dc6a61bf06a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -886,8 +886,8 @@ static int irq_thread(void *data) irq_thread_check_affinity(desc, action); action_ret = handler_fn(desc, action); - if (!noirqdebug) - note_interrupt(action->irq, desc, action_ret); + if (action_ret == IRQ_HANDLED) + atomic_inc(&desc->threads_handled); wake_threads_waitq(desc); } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index a1d8cc63b56e..e2514b0e439e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, return action && (action->flags & IRQF_IRQPOLL); } +#define SPURIOUS_DEFERRED 0x80000000 + void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { @@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, irq_settings_is_polled(desc)) return; - /* we get here again via the threaded handler */ - if (action_ret == IRQ_WAKE_THREAD) - return; - if (bad_action_ret(action_ret)) { report_bad_irq(irq, desc, action_ret); return; } + /* + * We cannot call note_interrupt from the threaded handler + * because we need to look at the compound of all handlers + * (primary and threaded). Aside of that in the threaded + * shared case we have no serialization against an incoming + * hardware interrupt while we are dealing with a threaded + * result. + * + * So in case a thread is woken, we just note the fact and + * defer the analysis to the next hardware interrupt. + * + * The threaded handlers store whether they sucessfully + * handled an interrupt and we check whether that number + * changed versus the last invocation. + * + * We could handle all interrupts with the delayed by one + * mechanism, but for the non forced threaded case we'd just + * add pointless overhead to the straight hardirq interrupts + * for the sake of a few lines less code. + */ + if (action_ret & IRQ_WAKE_THREAD) { + /* + * There is a thread woken. Check whether one of the + * shared primary handlers returned IRQ_HANDLED. If + * not we defer the spurious detection to the next + * interrupt. + */ + if (action_ret == IRQ_WAKE_THREAD) { + int handled; + /* + * We use bit 31 of thread_handled_last to + * denote the deferred spurious detection + * active. No locking necessary as + * thread_handled_last is only accessed here + * and we have the guarantee that hard + * interrupts are not reentrant. + */ + if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { + desc->threads_handled_last |= SPURIOUS_DEFERRED; + return; + } + /* + * Check whether one of the threaded handlers + * returned IRQ_HANDLED since the last + * interrupt happened. + * + * For simplicity we just set bit 31, as it is + * set in threads_handled_last as well. So we + * avoid extra masking. And we really do not + * care about the high bits of the handled + * count. We just care about the count being + * different than the one we saw before. + */ + handled = atomic_read(&desc->threads_handled); + handled |= SPURIOUS_DEFERRED; + if (handled != desc->threads_handled_last) { + action_ret = IRQ_HANDLED; + /* + * Note: We keep the SPURIOUS_DEFERRED + * bit set. We are handling the + * previous invocation right now. + * Keep it for the current one, so the + * next hardware interrupt will + * account for it. + */ + desc->threads_handled_last = handled; + } else { + /* + * None of the threaded handlers felt + * responsible for the last interrupt + * + * We keep the SPURIOUS_DEFERRED bit + * set in threads_handled_last as we + * need to account for the current + * interrupt as well. + */ + action_ret = IRQ_NONE; + } + } else { + /* + * One of the primary handlers returned + * IRQ_HANDLED. So we don't care about the + * threaded handlers on the same line. Clear + * the deferred detection bit. + * + * In theory we could/should check whether the + * deferred bit is set and take the result of + * the previous run into account here as + * well. But it's really not worth the + * trouble. If every other interrupt is + * handled we never trigger the spurious + * detector. And if this is just the one out + * of 100k unhandled ones which is handled + * then we merily delay the spurious detection + * by one hard interrupt. Not a real problem. + */ + desc->threads_handled_last &= ~SPURIOUS_DEFERRED; + } + } + if (unlikely(action_ret == IRQ_NONE)) { /* * If we are seeing only the odd spurious IRQ caused by -- cgit v1.2.3 From d3ba720dd58cdf6630fee4b89482c465d5ad0d0f Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Tue, 8 Apr 2014 18:45:53 -0700 Subject: Drivers: hv: Eliminate the channel spinlock in the callback path By ensuring that we set the callback handler to NULL in the channel close path on the same CPU that the channel is bound to, we can eliminate this lock acquisition and release in a performance critical path. Signed-off-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel.c | 16 ++++++++++++---- drivers/hv/channel_mgmt.c | 11 +++++++---- drivers/hv/connection.c | 11 ++++------- include/linux/hyperv.h | 2 ++ 4 files changed, 25 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 602ca86a6488..740edec161bb 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -471,18 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) } EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); +static void reset_channel_cb(void *arg) +{ + struct vmbus_channel *channel = arg; + + channel->onchannel_callback = NULL; +} + static void vmbus_close_internal(struct vmbus_channel *channel) { struct vmbus_channel_close_channel *msg; int ret; - unsigned long flags; channel->state = CHANNEL_OPEN_STATE; channel->sc_creation_callback = NULL; /* Stop callback and cancel the timer asap */ - spin_lock_irqsave(&channel->inbound_lock, flags); - channel->onchannel_callback = NULL; - spin_unlock_irqrestore(&channel->inbound_lock, flags); + if (channel->target_cpu != smp_processor_id()) + smp_call_function_single(channel->target_cpu, reset_channel_cb, + channel, true); + else + reset_channel_cb(channel); /* Send a closing message */ diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index fa920469bf10..6f7fdd9a7e77 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -365,7 +365,7 @@ static u32 next_vp; * performance critical channels (IDE, SCSI and Network) will be uniformly * distributed across all available CPUs. */ -static u32 get_vp_index(uuid_le *type_guid) +static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid) { u32 cur_cpu; int i; @@ -387,10 +387,13 @@ static u32 get_vp_index(uuid_le *type_guid) * Also if the channel is not a performance critical * channel, bind it to cpu 0. */ - return 0; + channel->target_cpu = 0; + channel->target_vp = 0; + return; } cur_cpu = (++next_vp % max_cpus); - return hv_context.vp_index[cur_cpu]; + channel->target_cpu = cur_cpu; + channel->target_vp = hv_context.vp_index[cur_cpu]; } /* @@ -438,7 +441,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) offer->connection_id; } - newchannel->target_vp = get_vp_index(&offer->offer.if_type); + init_vp_index(newchannel, &offer->offer.if_type); memcpy(&newchannel->offermsg, offer, sizeof(struct vmbus_channel_offer_channel)); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 2e7801af466e..df2363ea017f 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -277,7 +277,6 @@ struct vmbus_channel *relid2channel(u32 relid) static void process_chn_event(u32 relid) { struct vmbus_channel *channel; - unsigned long flags; void *arg; bool read_state; u32 bytes_to_read; @@ -296,13 +295,12 @@ static void process_chn_event(u32 relid) /* * A channel once created is persistent even when there * is no driver handling the device. An unloading driver - * sets the onchannel_callback to NULL under the - * protection of the channel inbound_lock. Thus, checking - * and invoking the driver specific callback takes care of - * orderly unloading of the driver. + * sets the onchannel_callback to NULL on the same CPU + * as where this interrupt is handled (in an interrupt context). + * Thus, checking and invoking the driver specific callback takes + * care of orderly unloading of the driver. */ - spin_lock_irqsave(&channel->inbound_lock, flags); if (channel->onchannel_callback != NULL) { arg = channel->channel_callback_context; read_state = channel->batched_reading; @@ -327,7 +325,6 @@ static void process_chn_event(u32 relid) pr_err("no channel callback for relid - %u\n", relid); } - spin_unlock_irqrestore(&channel->inbound_lock, flags); } /* diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 2d7b4f139c32..a274e089df78 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -696,6 +696,8 @@ struct vmbus_channel { * preserve the earlier behavior. */ u32 target_vp; + /* The corresponding CPUID in the guest */ + u32 target_cpu; /* * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support -- cgit v1.2.3 From 3a28fa35d6658703cd26f9c16aaea0eae06afd40 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Tue, 8 Apr 2014 18:45:54 -0700 Subject: Drivers: hv: vmbus: Implement per-CPU mapping of relid to channel Currently the mapping of the relID to channel is done under the protection of a single spin lock. Starting with ws2012, each channel is bound to a specific VCPU in the guest. Use this binding to eliminate the spin lock by setting up per-cpu state for mapping relId to the channel. Signed-off-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel_mgmt.c | 41 ++++++++++++++++++++++++++++++++++++++++- drivers/hv/connection.c | 24 +++++++++++++++++++++++- drivers/hv/hv.c | 2 ++ drivers/hv/hyperv_vmbus.h | 5 +++++ include/linux/hyperv.h | 5 +++++ 5 files changed, 75 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6f7fdd9a7e77..6c8b032cacba 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -149,6 +149,7 @@ static struct vmbus_channel *alloc_channel(void) spin_lock_init(&channel->sc_lock); INIT_LIST_HEAD(&channel->sc_list); + INIT_LIST_HEAD(&channel->percpu_list); channel->controlwq = create_workqueue("hv_vmbus_ctl"); if (!channel->controlwq) { @@ -188,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel) queue_work(vmbus_connection.work_queue, &channel->work); } +static void percpu_channel_enq(void *arg) +{ + struct vmbus_channel *channel = arg; + int cpu = smp_processor_id(); + + list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); +} +static void percpu_channel_deq(void *arg) +{ + struct vmbus_channel *channel = arg; + + list_del(&channel->percpu_list); +} /* * vmbus_process_rescind_offer - @@ -210,6 +224,12 @@ static void vmbus_process_rescind_offer(struct work_struct *work) msg.header.msgtype = CHANNELMSG_RELID_RELEASED; vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); + if (channel->target_cpu != smp_processor_id()) + smp_call_function_single(channel->target_cpu, + percpu_channel_deq, channel, true); + else + percpu_channel_deq(channel); + if (channel->primary_channel == NULL) { spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_del(&channel->listentry); @@ -245,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work) work); struct vmbus_channel *channel; bool fnew = true; + bool enq = false; int ret; unsigned long flags; @@ -264,12 +285,22 @@ static void vmbus_process_offer(struct work_struct *work) } } - if (fnew) + if (fnew) { list_add_tail(&newchannel->listentry, &vmbus_connection.chn_list); + enq = true; + } spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + if (enq) { + if (newchannel->target_cpu != smp_processor_id()) + smp_call_function_single(newchannel->target_cpu, + percpu_channel_enq, + newchannel, true); + else + percpu_channel_enq(newchannel); + } if (!fnew) { /* * Check to see if this is a sub-channel. @@ -282,6 +313,14 @@ static void vmbus_process_offer(struct work_struct *work) spin_lock_irqsave(&channel->sc_lock, flags); list_add_tail(&newchannel->sc_list, &channel->sc_list); spin_unlock_irqrestore(&channel->sc_lock, flags); + + if (newchannel->target_cpu != smp_processor_id()) + smp_call_function_single(newchannel->target_cpu, + percpu_channel_enq, + newchannel, true); + else + percpu_channel_enq(newchannel); + newchannel->state = CHANNEL_OPEN_STATE; if (channel->sc_creation_callback != NULL) channel->sc_creation_callback(newchannel); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index df2363ea017f..7f10c151632a 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -234,6 +234,28 @@ cleanup: return ret; } +/* + * Map the given relid to the corresponding channel based on the + * per-cpu list of channels that have been affinitized to this CPU. + * This will be used in the channel callback path as we can do this + * mapping in a lock-free fashion. + */ +static struct vmbus_channel *pcpu_relid2channel(u32 relid) +{ + struct vmbus_channel *channel; + struct vmbus_channel *found_channel = NULL; + int cpu = smp_processor_id(); + struct list_head *pcpu_head = &hv_context.percpu_list[cpu]; + + list_for_each_entry(channel, pcpu_head, percpu_list) { + if (channel->offermsg.child_relid == relid) { + found_channel = channel; + break; + } + } + + return found_channel; +} /* * relid2channel - Get the channel object given its @@ -285,7 +307,7 @@ static void process_chn_event(u32 relid) * Find the channel based on this relid and invokes the * channel callback to process the event */ - channel = relid2channel(relid); + channel = pcpu_relid2channel(relid); if (!channel) { pr_err("channel not found for relid - %u\n", relid); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index bcb49502c3bf..edfc8488cb03 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -383,6 +383,8 @@ void hv_synic_init(void *arg) */ rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); hv_context.vp_index[cpu] = (u32)vp_index; + + INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); return; } diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 860134da8039..18d1a8404cbc 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -510,6 +510,11 @@ struct hv_context { * basis. */ struct tasklet_struct *event_dpc[NR_CPUS]; + /* + * To optimize the mapping of relid to channel, maintain + * per-cpu list of the channels based on their CPU affinity. + */ + struct list_head percpu_list[NR_CPUS]; }; extern struct hv_context hv_context; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index a274e089df78..08cfaff8a072 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -734,6 +734,11 @@ struct vmbus_channel { * Support per-channel state for use by vmbus drivers. */ void *per_channel_state; + /* + * To support per-cpu lookup mapping of relid to channel, + * link up channels based on their CPU affinity. + */ + struct list_head percpu_list; }; static inline void set_channel_read_state(struct vmbus_channel *c, bool state) -- cgit v1.2.3 From 425f3740cf8e93fac6318ed862bcc3081b818f0b Mon Sep 17 00:00:00 2001 From: Alan Date: Mon, 28 Apr 2014 20:47:36 +0100 Subject: goldfish: Add a 64bit write helper The base code imported from the Google tree is ifdef heaven. Prepare to fix this by adding a helper function. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- include/linux/goldfish.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 include/linux/goldfish.h (limited to 'include/linux') diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h new file mode 100644 index 000000000000..9cc28902b54c --- /dev/null +++ b/include/linux/goldfish.h @@ -0,0 +1,15 @@ +#ifndef __LINUX_GOLDFISH_H +#define __LINUX_GOLDFISH_H + +/* Helpers for Goldfish virtual platform */ + +static inline void gf_write64(unsigned long data, + void __iomem *portl, void __iomem *porth) +{ + writel((u32)data, portl); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + writel(data>>32, porth); +#endif +} + +#endif /* __LINUX_GOLDFISH_H */ -- cgit v1.2.3 From 5d02edfc3957446fd625c0b018e14c6631a791f4 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Sat, 19 Apr 2014 00:22:00 +0100 Subject: iio: hid-sensors: Convert units and exponent HID sensor hub specify a default unit and alternative units. This along with unit exponent can be used adjust scale. This change change HID sensor data units to IIO defined units for each sensor type. So in this way user space can use a simply use: "(data + offset) * scale" to get final result. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- .../iio/common/hid-sensors/hid-sensor-attributes.c | 114 +++++++++++++++++++++ include/linux/hid-sensor-hub.h | 4 + 2 files changed, 118 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index 75b54730a963..e61b1faa1e06 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -26,6 +26,40 @@ #include #include +struct { + u32 usage_id; + int unit; /* 0 for default others from HID sensor spec */ + int scale_val0; /* scale, whole number */ + int scale_val1; /* scale, fraction in micros */ +} static unit_conversion[] = { + {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, + {HID_USAGE_SENSOR_ACCEL_3D, + HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, + {HID_USAGE_SENSOR_ACCEL_3D, + HID_USAGE_SENSOR_UNITS_G, 9, 806650}, + + {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, + {HID_USAGE_SENSOR_GYRO_3D, + HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, + {HID_USAGE_SENSOR_GYRO_3D, + HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, + + {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, + {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, + + {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, + {HID_USAGE_SENSOR_INCLINOMETER_3D, + HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, + {HID_USAGE_SENSOR_INCLINOMETER_3D, + HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, + + {HID_USAGE_SENSOR_ALS, 0, 1, 0}, + {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, + + {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0}, + {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0}, +}; + static int pow_10(unsigned power) { int i; @@ -209,6 +243,86 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, } EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); +/* + * This fuction applies the unit exponent to the scale. + * For example: + * 9.806650 ->exp:2-> val0[980]val1[665000] + * 9.000806 ->exp:2-> val0[900]val1[80600] + * 0.174535 ->exp:2-> val0[17]val1[453500] + * 1.001745 ->exp:0-> val0[1]val1[1745] + * 1.001745 ->exp:2-> val0[100]val1[174500] + * 1.001745 ->exp:4-> val0[10017]val1[450000] + * 9.806650 ->exp:-2-> val0[0]val1[98066] + */ +static void adjust_exponent_micro(int *val0, int *val1, int scale0, + int scale1, int exp) +{ + int i; + int x; + int res; + int rem; + + if (exp > 0) { + *val0 = scale0 * pow_10(exp); + res = 0; + if (exp > 6) { + *val1 = 0; + return; + } + for (i = 0; i < exp; ++i) { + x = scale1 / pow_10(5 - i); + res += (pow_10(exp - 1 - i) * x); + scale1 = scale1 % pow_10(5 - i); + } + *val0 += res; + *val1 = scale1 * pow_10(exp); + } else if (exp < 0) { + exp = abs(exp); + if (exp > 6) { + *val0 = *val1 = 0; + return; + } + *val0 = scale0 / pow_10(exp); + rem = scale0 % pow_10(exp); + res = 0; + for (i = 0; i < (6 - exp); ++i) { + x = scale1 / pow_10(5 - i); + res += (pow_10(5 - exp - i) * x); + scale1 = scale1 % pow_10(5 - i); + } + *val1 = rem * pow_10(6 - exp) + res; + } else { + *val0 = scale0; + *val1 = scale1; + } +} + +int hid_sensor_format_scale(u32 usage_id, + struct hid_sensor_hub_attribute_info *attr_info, + int *val0, int *val1) +{ + int i; + int exp; + + *val0 = 1; + *val1 = 0; + + for (i = 0; ARRAY_SIZE(unit_conversion); ++i) { + if (unit_conversion[i].usage_id == usage_id && + unit_conversion[i].unit == attr_info->units) { + exp = hid_sensor_convert_exponent( + attr_info->unit_expo); + adjust_exponent_micro(val0, val1, + unit_conversion[i].scale_val0, + unit_conversion[i].scale_val1, exp); + break; + } + } + + return IIO_VAL_INT_PLUS_MICRO; +} +EXPORT_SYMBOL(hid_sensor_format_scale); + int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, struct hid_sensor_common *st) diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index b70cfd7ff29c..89626b23c246 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -223,4 +223,8 @@ int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, int hid_sensor_get_usage_index(struct hid_sensor_hub_device *hsdev, u32 report_id, int field_index, u32 usage_id); +int hid_sensor_format_scale(u32 usage_id, + struct hid_sensor_hub_attribute_info *attr_info, + int *val0, int *val1); + #endif -- cgit v1.2.3 From 9030924510a9e7d4b7d218749533840075879f2f Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Sat, 19 Apr 2014 00:22:00 +0100 Subject: iio: hid-sensors: Add api to get poll value Added interface to get poll value in milli-seconds. This value is changed by changing sampling frequency. This API allows clients to wait for at least some poll milli seconds before reading a new sample. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- .../iio/common/hid-sensors/hid-sensor-attributes.c | 20 ++++++++++++++++++++ include/linux/hid-sensor-hub.h | 2 ++ 2 files changed, 22 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index e61b1faa1e06..372964635ccf 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -147,6 +147,26 @@ static u32 convert_to_vtf_format(int size, int exp, int val1, int val2) return value; } +s32 hid_sensor_read_poll_value(struct hid_sensor_common *st) +{ + s32 value = 0; + int ret; + + ret = sensor_hub_get_feature(st->hsdev, + st->poll.report_id, + st->poll.index, &value); + + if (ret < 0 || value < 0) { + return -EINVAL; + } else { + if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND) + value = value * 1000; + } + + return value; +} +EXPORT_SYMBOL(hid_sensor_read_poll_value); + int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, int *val1, int *val2) { diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 89626b23c246..88d8d636a68f 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -227,4 +227,6 @@ int hid_sensor_format_scale(u32 usage_id, struct hid_sensor_hub_attribute_info *attr_info, int *val0, int *val1); +s32 hid_sensor_read_poll_value(struct hid_sensor_common *st); + #endif -- cgit v1.2.3 From 56ff6be608659ac06d4e3cc5827476efa29d610f Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Sat, 19 Apr 2014 00:22:00 +0100 Subject: iio: hid-sensors: Add API to power on/off Added an API to allow client drivers to turn ON and OFF sensors for quick read. Added data_read as counting varaible instead of boolean, so that sensor is powered off only when last user released it. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jonathan Cameron --- drivers/iio/accel/hid-sensor-accel-3d.c | 7 +++---- drivers/iio/common/hid-sensors/hid-sensor-trigger.c | 17 +++++++++++++---- drivers/iio/common/hid-sensors/hid-sensor-trigger.h | 1 + drivers/iio/gyro/hid-sensor-gyro-3d.c | 7 +++---- drivers/iio/light/hid-sensor-als.c | 7 +++---- drivers/iio/light/hid-sensor-prox.c | 7 +++---- drivers/iio/magnetometer/hid-sensor-magn-3d.c | 7 +++---- drivers/iio/orientation/hid-sensor-incl-3d.c | 7 +++---- drivers/iio/orientation/hid-sensor-rotation.c | 8 +++----- drivers/iio/pressure/hid-sensor-press.c | 7 +++---- include/linux/hid-sensor-hub.h | 2 +- 11 files changed, 39 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index ca50a91752d8..cf61c87a47e9 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -201,9 +201,8 @@ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct accel_3d_state *accel_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "accel_3d_proc_event [%d]\n", - accel_state->common_attributes.data_ready); - if (accel_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "accel_3d_proc_event\n"); + if (atomic_read(&accel_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, accel_state->accel_val, sizeof(accel_state->accel_val)); @@ -342,7 +341,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - accel_state->common_attributes.data_ready = false; + atomic_set(&accel_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &accel_state->common_attributes); if (ret < 0) { diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index dbefbdaf7cd1..73282cee0c81 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -28,16 +28,17 @@ #include #include "hid-sensor-trigger.h" -static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, - bool state) +int hid_sensor_power_state(struct hid_sensor_common *st, bool state) { - struct hid_sensor_common *st = iio_trigger_get_drvdata(trig); int state_val; int report_val; if (state) { if (sensor_hub_device_open(st->hsdev)) return -EIO; + + atomic_inc(&st->data_ready); + state_val = hid_sensor_get_usage_index(st->hsdev, st->power_state.report_id, st->power_state.index, @@ -47,6 +48,8 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, st->report_state.index, HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); } else { + if (!atomic_dec_and_test(&st->data_ready)) + return 0; sensor_hub_device_close(st->hsdev); state_val = hid_sensor_get_usage_index(st->hsdev, st->power_state.report_id, @@ -57,7 +60,6 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, st->report_state.index, HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM); } - st->data_ready = state; if (state_val >= 0) { state_val += st->power_state.logical_minimum; @@ -75,6 +77,13 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, return 0; } +EXPORT_SYMBOL(hid_sensor_power_state); + +static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, + bool state) +{ + return hid_sensor_power_state(iio_trigger_get_drvdata(trig), state); +} void hid_sensor_remove_trigger(struct hid_sensor_common *attrb) { diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h index ca02f7811aa8..0f8e78c249d3 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h @@ -22,5 +22,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, struct hid_sensor_common *attrb); void hid_sensor_remove_trigger(struct hid_sensor_common *attrb); +int hid_sensor_power_state(struct hid_sensor_common *st, bool state); #endif diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c index 53ac06040fbe..392c30b8cd74 100644 --- a/drivers/iio/gyro/hid-sensor-gyro-3d.c +++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c @@ -201,9 +201,8 @@ static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct gyro_3d_state *gyro_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "gyro_3d_proc_event [%d]\n", - gyro_state->common_attributes.data_ready); - if (gyro_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "gyro_3d_proc_event\n"); + if (atomic_read(&gyro_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, gyro_state->gyro_val, sizeof(gyro_state->gyro_val)); @@ -339,7 +338,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - gyro_state->common_attributes.data_ready = false; + atomic_set(&gyro_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &gyro_state->common_attributes); if (ret < 0) { diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index 39b50be9d456..e124b395f320 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -180,9 +180,8 @@ static int als_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct als_state *als_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "als_proc_event [%d]\n", - als_state->common_attributes.data_ready); - if (als_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "als_proc_event\n"); + if (atomic_read(&als_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, &als_state->illum, sizeof(als_state->illum)); @@ -305,7 +304,7 @@ static int hid_als_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - als_state->common_attributes.data_ready = false; + atomic_set(&als_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &als_state->common_attributes); if (ret < 0) { diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 1894ab196f97..07e98ec8e9f1 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -176,9 +176,8 @@ static int prox_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct prox_state *prox_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "prox_proc_event [%d]\n", - prox_state->common_attributes.data_ready); - if (prox_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "prox_proc_event\n"); + if (atomic_read(&prox_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, &prox_state->human_presence, sizeof(prox_state->human_presence)); @@ -297,7 +296,7 @@ static int hid_prox_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - prox_state->common_attributes.data_ready = false; + atomic_set(&prox_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &prox_state->common_attributes); if (ret) { diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c index 131ced0dcb1c..54eea6a17061 100644 --- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c +++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c @@ -202,9 +202,8 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct magn_3d_state *magn_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "magn_3d_proc_event [%d]\n", - magn_state->common_attributes.data_ready); - if (magn_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n"); + if (atomic_read(&magn_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, magn_state->magn_val, sizeof(magn_state->magn_val)); @@ -343,7 +342,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - magn_state->common_attributes.data_ready = false; + atomic_set(&magn_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &magn_state->common_attributes); if (ret < 0) { diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c index f0c465cc192a..bf11678dd04e 100644 --- a/drivers/iio/orientation/hid-sensor-incl-3d.c +++ b/drivers/iio/orientation/hid-sensor-incl-3d.c @@ -200,9 +200,8 @@ static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct incl_3d_state *incl_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "incl_3d_proc_event [%d]\n", - incl_state->common_attributes.data_ready); - if (incl_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "incl_3d_proc_event\n"); + if (atomic_read(&incl_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, (u8 *)incl_state->incl_val, sizeof(incl_state->incl_val)); @@ -358,7 +357,7 @@ static int hid_incl_3d_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - incl_state->common_attributes.data_ready = false; + atomic_set(&incl_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &incl_state->common_attributes); if (ret) { diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c index 51387bbc1ce1..dccf848e8b0f 100644 --- a/drivers/iio/orientation/hid-sensor-rotation.c +++ b/drivers/iio/orientation/hid-sensor-rotation.c @@ -145,10 +145,8 @@ static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct dev_rot_state *rot_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "dev_rot_proc_event [%d]\n", - rot_state->common_attributes.data_ready); - - if (rot_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "dev_rot_proc_event\n"); + if (atomic_read(&rot_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, (u8 *)rot_state->sampled_vals, sizeof(rot_state->sampled_vals)); @@ -272,7 +270,7 @@ static int hid_dev_rot_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); return ret; } - rot_state->common_attributes.data_ready = false; + atomic_set(&rot_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &rot_state->common_attributes); if (ret) { diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c index ff69da4443b8..39df50c45dab 100644 --- a/drivers/iio/pressure/hid-sensor-press.c +++ b/drivers/iio/pressure/hid-sensor-press.c @@ -180,9 +180,8 @@ static int press_proc_event(struct hid_sensor_hub_device *hsdev, struct iio_dev *indio_dev = platform_get_drvdata(priv); struct press_state *press_state = iio_priv(indio_dev); - dev_dbg(&indio_dev->dev, "press_proc_event [%d]\n", - press_state->common_attributes.data_ready); - if (press_state->common_attributes.data_ready) + dev_dbg(&indio_dev->dev, "press_proc_event\n"); + if (atomic_read(&press_state->common_attributes.data_ready)) hid_sensor_push_data(indio_dev, &press_state->press_data, sizeof(press_state->press_data)); @@ -307,7 +306,7 @@ static int hid_press_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } - press_state->common_attributes.data_ready = false; + atomic_set(&press_state->common_attributes.data_ready, 0); ret = hid_sensor_setup_trigger(indio_dev, name, &press_state->common_attributes); if (ret) { diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 88d8d636a68f..51f7ccadf923 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -189,7 +189,7 @@ struct hid_sensor_common { struct hid_sensor_hub_device *hsdev; struct platform_device *pdev; unsigned usage_id; - bool data_ready; + atomic_t data_ready; struct iio_trigger *trigger; struct hid_sensor_hub_attribute_info poll; struct hid_sensor_hub_attribute_info report_state; -- cgit v1.2.3 From 8c1eb25326552bfe6912ea160dfb3de0207a7550 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 11 Mar 2014 11:23:50 +0100 Subject: of: Spelling s/anonymouns/anonymous/ Signed-off-by: Geert Uytterhoeven Cc: Grant Likely Cc: devicetree@vger.kernel.org Signed-off-by: Jiri Kosina --- include/linux/of_platform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 05cb4a928252..8cdd53bf1114 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -37,7 +37,7 @@ * Note: Using an auxdata lookup table should be considered a last resort when * converting a platform to use the DT. Normally the automatically generated * device name will not matter, and drivers should obtain data from the device - * node instead of from an anonymouns platform_data pointer. + * node instead of from an anonymous platform_data pointer. */ struct of_dev_auxdata { char *compatible; -- cgit v1.2.3 From 719d93cd5f5c5c8775b7a38192069e8e1d1ac46e Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 16 Jan 2014 13:44:20 +0100 Subject: kvm/irqchip: Speed up KVM_SET_GSI_ROUTING When starting lots of dataplane devices the bootup takes very long on Christian's s390 with irqfd patches. With larger setups he is even able to trigger some timeouts in some components. Turns out that the KVM_SET_GSI_ROUTING ioctl takes very long (strace claims up to 0.1 sec) when having multiple CPUs. This is caused by the synchronize_rcu and the HZ=100 of s390. By changing the code to use a private srcu we can speed things up. This patch reduces the boot time till mounting root from 8 to 2 seconds on my s390 guest with 100 disks. Uses of hlist_for_each_entry_rcu, hlist_add_head_rcu, hlist_del_init_rcu are fine because they do not have lockdep checks (hlist_for_each_entry_rcu uses rcu_dereference_raw rather than rcu_dereference, and write-sides do not do rcu lockdep at all). Note that we're hardly relying on the "sleepable" part of srcu. We just want SRCU's faster detection of grace periods. Testing was done by Andrew Theurer using netperf tests STREAM, MAERTS and RR. The difference between results "before" and "after" the patch has mean -0.2% and standard deviation 0.6%. Using a paired t-test on the data points says that there is a 2.5% probability that the patch is the cause of the performance difference (rather than a random fluctuation). (Restricting the t-test to RR, which is the most likely to be affected, changes the numbers to respectively -0.3% mean, 0.7% stdev, and 8% probability that the numbers actually say something about the patch. The probability increases mostly because there are fewer data points). Cc: Marcelo Tosatti Cc: Michael S. Tsirkin Tested-by: Christian Borntraeger # s390 Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 1 + virt/kvm/eventfd.c | 25 +++++++++++++++---------- virt/kvm/irq_comm.c | 17 +++++++++-------- virt/kvm/irqchip.c | 31 ++++++++++++++++--------------- virt/kvm/kvm_main.c | 16 ++++++++++------ 5 files changed, 51 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1e125b055327..970c68197c69 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -370,6 +370,7 @@ struct kvm { struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots *memslots; struct srcu_struct srcu; + struct srcu_struct irq_srcu; #ifdef CONFIG_KVM_APIC_ARCHITECTURE u32 bsp_vcpu_id; #endif diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 912ec5a95e2c..20c3af7692c5 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "iodev.h" @@ -118,19 +119,22 @@ static void irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) { struct _irqfd_resampler *resampler; + struct kvm *kvm; struct _irqfd *irqfd; + int idx; resampler = container_of(kian, struct _irqfd_resampler, notifier); + kvm = resampler->kvm; - kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, + kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, resampler->notifier.gsi, 0, false); - rcu_read_lock(); + idx = srcu_read_lock(&kvm->irq_srcu); list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) eventfd_signal(irqfd->resamplefd, 1); - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); } static void @@ -142,7 +146,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd) mutex_lock(&kvm->irqfds.resampler_lock); list_del_rcu(&irqfd->resampler_link); - synchronize_rcu(); + synchronize_srcu(&kvm->irq_srcu); if (list_empty(&resampler->list)) { list_del(&resampler->link); @@ -221,17 +225,18 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) unsigned long flags = (unsigned long)key; struct kvm_kernel_irq_routing_entry *irq; struct kvm *kvm = irqfd->kvm; + int idx; if (flags & POLLIN) { - rcu_read_lock(); - irq = rcu_dereference(irqfd->irq_entry); + idx = srcu_read_lock(&kvm->irq_srcu); + irq = srcu_dereference(irqfd->irq_entry, &kvm->irq_srcu); /* An event has been signaled, inject an interrupt */ if (irq) kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); else schedule_work(&irqfd->inject); - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); } if (flags & POLLHUP) { @@ -363,7 +368,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) } list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); - synchronize_rcu(); + synchronize_srcu(&kvm->irq_srcu); mutex_unlock(&kvm->irqfds.resampler_lock); } @@ -465,7 +470,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) * another thread calls kvm_irq_routing_update before * we flush workqueue below (we synchronize with * kvm_irq_routing_update using irqfds.lock). - * It is paired with synchronize_rcu done by caller + * It is paired with synchronize_srcu done by caller * of that function. */ rcu_assign_pointer(irqfd->irq_entry, NULL); @@ -524,7 +529,7 @@ kvm_irqfd_release(struct kvm *kvm) /* * Change irq_routing and irqfd. - * Caller must invoke synchronize_rcu afterwards. + * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. */ void kvm_irq_routing_update(struct kvm *kvm, struct kvm_irq_routing_table *irq_rt) diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index e2e6b4473a96..ced4a542a031 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -163,6 +163,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) struct kvm_kernel_irq_routing_entry *e; int ret = -EINVAL; struct kvm_irq_routing_table *irq_rt; + int idx; trace_kvm_set_irq(irq, level, irq_source_id); @@ -174,8 +175,8 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) * Since there's no easy way to do this, we only support injecting MSI * which is limited to 1:1 GSI mapping. */ - rcu_read_lock(); - irq_rt = rcu_dereference(kvm->irq_routing); + idx = srcu_read_lock(&kvm->irq_srcu); + irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); if (irq < irq_rt->nr_rt_entries) hlist_for_each_entry(e, &irq_rt->map[irq], link) { if (likely(e->type == KVM_IRQ_ROUTING_MSI)) @@ -184,7 +185,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) ret = -EWOULDBLOCK; break; } - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); return ret; } @@ -253,22 +254,22 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, mutex_lock(&kvm->irq_lock); hlist_del_rcu(&kimn->link); mutex_unlock(&kvm->irq_lock); - synchronize_rcu(); + synchronize_srcu(&kvm->irq_srcu); } void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, bool mask) { struct kvm_irq_mask_notifier *kimn; - int gsi; + int idx, gsi; - rcu_read_lock(); - gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + idx = srcu_read_lock(&kvm->irq_srcu); + gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin]; if (gsi != -1) hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) if (kimn->irq == gsi) kimn->func(kimn, mask); - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); } int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index 20dc9e4a8f6c..b43c275775cd 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include "irq.h" @@ -33,19 +34,19 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; - int gsi; + int gsi, idx; - rcu_read_lock(); - gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + idx = srcu_read_lock(&kvm->irq_srcu); + gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin]; if (gsi != -1) hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) { - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); return true; } - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); return false; } @@ -54,18 +55,18 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; - int gsi; + int gsi, idx; trace_kvm_ack_irq(irqchip, pin); - rcu_read_lock(); - gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + idx = srcu_read_lock(&kvm->irq_srcu); + gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin]; if (gsi != -1) hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) kian->irq_acked(kian); - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); } void kvm_register_irq_ack_notifier(struct kvm *kvm, @@ -85,7 +86,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, mutex_lock(&kvm->irq_lock); hlist_del_init_rcu(&kian->link); mutex_unlock(&kvm->irq_lock); - synchronize_rcu(); + synchronize_srcu(&kvm->irq_srcu); #ifdef __KVM_HAVE_IOAPIC kvm_vcpu_request_scan_ioapic(kvm); #endif @@ -115,7 +116,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; - int ret = -1, i = 0; + int ret = -1, i = 0, idx; struct kvm_irq_routing_table *irq_rt; trace_kvm_set_irq(irq, level, irq_source_id); @@ -124,12 +125,12 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, * IOAPIC. So set the bit in both. The guest will ignore * writes to the unused one. */ - rcu_read_lock(); - irq_rt = rcu_dereference(kvm->irq_routing); + idx = srcu_read_lock(&kvm->irq_srcu); + irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); if (irq < irq_rt->nr_rt_entries) hlist_for_each_entry(e, &irq_rt->map[irq], link) irq_set[i++] = *e; - rcu_read_unlock(); + srcu_read_unlock(&kvm->irq_srcu, idx); while(i--) { int r; @@ -226,7 +227,7 @@ int kvm_set_irq_routing(struct kvm *kvm, kvm_irq_routing_update(kvm, new); mutex_unlock(&kvm->irq_lock); - synchronize_rcu(); + synchronize_srcu_expedited(&kvm->irq_srcu); new = old; r = 0; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index fa70c6e642b4..95b4c2b3906a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -457,11 +457,11 @@ static struct kvm *kvm_create_vm(unsigned long type) r = kvm_arch_init_vm(kvm, type); if (r) - goto out_err_nodisable; + goto out_err_no_disable; r = hardware_enable_all(); if (r) - goto out_err_nodisable; + goto out_err_no_disable; #ifdef CONFIG_HAVE_KVM_IRQCHIP INIT_HLIST_HEAD(&kvm->mask_notifier_list); @@ -473,10 +473,12 @@ static struct kvm *kvm_create_vm(unsigned long type) r = -ENOMEM; kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!kvm->memslots) - goto out_err_nosrcu; + goto out_err_no_srcu; kvm_init_memslots_id(kvm); if (init_srcu_struct(&kvm->srcu)) - goto out_err_nosrcu; + goto out_err_no_srcu; + if (init_srcu_struct(&kvm->irq_srcu)) + goto out_err_no_irq_srcu; for (i = 0; i < KVM_NR_BUSES; i++) { kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); @@ -505,10 +507,12 @@ static struct kvm *kvm_create_vm(unsigned long type) return kvm; out_err: + cleanup_srcu_struct(&kvm->irq_srcu); +out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); -out_err_nosrcu: +out_err_no_srcu: hardware_disable_all(); -out_err_nodisable: +out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); kfree(kvm->memslots); -- cgit v1.2.3 From 8febcaa2aac184d7e729acb75e9c4b80b04ad1b9 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 24 Apr 2014 11:30:01 -0400 Subject: device: introduce per device dma_pfn_offset On few architectures, there are few restrictions on DMAble area of system RAM. That also means that devices needs to know about this restrictions so that the dma_masks can be updated accordingly and dma address translation helpers can add/subtract the dma offset. In most of cases DMA addresses can be performed using offset value of Bus address space relatively to physical address space as following: PFN->DMA: __pfn_to_phys(pfn + [-]dma_pfn_offset) DMA->PFN: __phys_to_pfn(dma_addr) + [-]dma_pfn_offset So we introduce per device dma_pfn_offset which can be popullated by architecture init code while creating the devices. Cc: Greg Kroah-Hartman Cc: Russell King Cc: Arnd Bergmann Cc: Olof Johansson Cc: Grant Likely Cc: Catalin Marinas Cc: Linus Walleij Reviewed-by: Rob Herring Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar --- include/linux/device.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index 233bbbeb768d..85a52d698f78 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -691,6 +691,7 @@ struct acpi_dev_node { * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. + * @dma_pfn_offset: offset of DMA memory range relatively of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. * @dma_pools: Dma pools (if dma'ble device). @@ -756,6 +757,7 @@ struct device { not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ + unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; -- cgit v1.2.3 From 18308c94723e162ed121942335bc186e66820a7a Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Thu, 24 Apr 2014 11:30:02 -0400 Subject: of: introduce of_dma_get_range() helper The of_dma_get_range() allows to find "dma-range" property for the specified device and parse it. dma-ranges format: DMA addr (dma_addr) : naddr cells CPU addr (phys_addr_t) : pna cells size : nsize cells Cc: Greg Kroah-Hartman Cc: Russell King Cc: Arnd Bergmann Cc: Olof Johansson Cc: Grant Likely Cc: Catalin Marinas Cc: Linus Walleij Reviewed-by: Rob Herring Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar --- drivers/of/address.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/of_address.h | 8 +++++ 2 files changed, 95 insertions(+) (limited to 'include/linux') diff --git a/drivers/of/address.c b/drivers/of/address.c index cb4242a69cd5..c54baee87d93 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -721,3 +721,90 @@ void __iomem *of_iomap(struct device_node *np, int index) return ioremap(res.start, resource_size(&res)); } EXPORT_SYMBOL(of_iomap); + +/** + * of_dma_get_range - Get DMA range info + * @np: device node to get DMA range info + * @dma_addr: pointer to store initial DMA address of DMA range + * @paddr: pointer to store initial CPU address of DMA range + * @size: pointer to store size of DMA range + * + * Look in bottom up direction for the first "dma-ranges" property + * and parse it. + * dma-ranges format: + * DMA addr (dma_addr) : naddr cells + * CPU addr (phys_addr_t) : pna cells + * size : nsize cells + * + * It returns -ENODEV if "dma-ranges" property was not found + * for this device in DT. + */ +int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) +{ + struct device_node *node = of_node_get(np); + const __be32 *ranges = NULL; + int len, naddr, nsize, pna; + int ret = 0; + u64 dmaaddr; + + if (!node) + return -EINVAL; + + while (1) { + naddr = of_n_addr_cells(node); + nsize = of_n_size_cells(node); + node = of_get_next_parent(node); + if (!node) + break; + + ranges = of_get_property(node, "dma-ranges", &len); + + /* Ignore empty ranges, they imply no translation required */ + if (ranges && len > 0) + break; + + /* + * At least empty ranges has to be defined for parent node if + * DMA is supported + */ + if (!ranges) + break; + } + + if (!ranges) { + pr_debug("%s: no dma-ranges found for node(%s)\n", + __func__, np->full_name); + ret = -ENODEV; + goto out; + } + + len /= sizeof(u32); + + pna = of_n_addr_cells(node); + + /* dma-ranges format: + * DMA addr : naddr cells + * CPU addr : pna cells + * size : nsize cells + */ + dmaaddr = of_read_number(ranges, naddr); + *paddr = of_translate_dma_address(np, ranges); + if (*paddr == OF_BAD_ADDR) { + pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n", + __func__, dma_addr, np->full_name); + ret = -EINVAL; + goto out; + } + *dma_addr = dmaaddr; + + *size = of_read_number(ranges + naddr + pna, nsize); + + pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", + *dma_addr, *paddr, *size); + +out: + of_node_put(node); + + return ret; +} +EXPORT_SYMBOL_GPL(of_dma_get_range); diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 5f6ed6b182b8..4d7b325af2ca 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -63,6 +63,8 @@ extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range_parser *parser, struct of_pci_range *range); +extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, + u64 *paddr, u64 *size); #else /* CONFIG_OF_ADDRESS */ static inline struct device_node *of_find_matching_node_by_address( struct device_node *from, @@ -90,6 +92,12 @@ static inline struct of_pci_range *of_pci_range_parser_one( { return NULL; } + +static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr, + u64 *paddr, u64 *size) +{ + return -ENODEV; +} #endif /* CONFIG_OF_ADDRESS */ #ifdef CONFIG_OF -- cgit v1.2.3 From 92ea637edea36e58236e3124f199161da6f5c5de Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 24 Apr 2014 11:30:03 -0400 Subject: of: introduce of_dma_is_coherent() helper The of_dma_is_coherent() helper parses the given DT device node to see if the "dma-coherent" property is supported and returns true or false accordingly. If the arch is always coherent or always noncoherent, then the default DMA ops has to be specified accordingly. Cc: Greg Kroah-Hartman Cc: Russell King Cc: Arnd Bergmann Cc: Olof Johansson Cc: Grant Likely Cc: Catalin Marinas Cc: Linus Walleij Reviewed-by: Rob Herring Signed-off-by: Santosh Shilimkar Signed-off-by: Grygorii Strashko --- drivers/of/address.c | 23 +++++++++++++++++++++++ include/linux/of_address.h | 6 ++++++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/drivers/of/address.c b/drivers/of/address.c index c54baee87d93..d244b2859aac 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -808,3 +808,26 @@ out: return ret; } EXPORT_SYMBOL_GPL(of_dma_get_range); + +/** + * of_dma_is_coherent - Check if device is coherent + * @np: device node + * + * It returns true if "dma-coherent" property was found + * for this device in DT. + */ +bool of_dma_is_coherent(struct device_node *np) +{ + struct device_node *node = of_node_get(np); + + while (node) { + if (of_property_read_bool(node, "dma-coherent")) { + of_node_put(node); + return true; + } + node = of_get_next_parent(node); + } + of_node_put(node); + return false; +} +EXPORT_SYMBOL_GPL(of_dma_is_coherent); \ No newline at end of file diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 4d7b325af2ca..839a3521b28e 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -65,6 +65,7 @@ extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range *range); extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size); +extern bool of_dma_is_coherent(struct device_node *np); #else /* CONFIG_OF_ADDRESS */ static inline struct device_node *of_find_matching_node_by_address( struct device_node *from, @@ -98,6 +99,11 @@ static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr, { return -ENODEV; } + +static inline bool of_dma_is_coherent(struct device_node *np) +{ + return false; +} #endif /* CONFIG_OF_ADDRESS */ #ifdef CONFIG_OF -- cgit v1.2.3 From 95713978b0a2929b72933235bb07c0a793e71afa Mon Sep 17 00:00:00 2001 From: Emilio López Date: Fri, 2 May 2014 17:57:16 +0200 Subject: clk: sunxi: Implement MMC phase control MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HdG: add header exporting clk_sunxi_mmc_phase_control Signed-off-by: Emilio López Signed-off-by: Hans de Goede Signed-off-by: Mike Turquette --- drivers/clk/sunxi/clk-sunxi.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/clk/sunxi.h | 22 ++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 include/linux/clk/sunxi.h (limited to 'include/linux') diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index bd7dc733c1ca..59f90401b900 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -506,6 +506,42 @@ CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk", +/** + * clk_sunxi_mmc_phase_control() - configures MMC clock phase control + */ + +void clk_sunxi_mmc_phase_control(struct clk_hw *hw, u8 sample, u8 output) +{ + #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw) + #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw) + + struct clk_composite *composite = to_clk_composite(hw); + struct clk_hw *rate_hw = composite->rate_hw; + struct clk_factors *factors = to_clk_factors(rate_hw); + unsigned long flags = 0; + u32 reg; + + if (factors->lock) + spin_lock_irqsave(factors->lock, flags); + + reg = readl(factors->reg); + + /* set sample clock phase control */ + reg &= ~(0x7 << 20); + reg |= ((sample & 0x7) << 20); + + /* set output clock phase control */ + reg &= ~(0x7 << 8); + reg |= ((output & 0x7) << 8); + + writel(reg, factors->reg); + + if (factors->lock) + spin_unlock_irqrestore(factors->lock, flags); +} +EXPORT_SYMBOL(clk_sunxi_mmc_phase_control); + + /** * sunxi_factors_clk_setup() - Setup function for factor clocks */ diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h new file mode 100644 index 000000000000..1ef5c899e458 --- /dev/null +++ b/include/linux/clk/sunxi.h @@ -0,0 +1,22 @@ +/* + * Copyright 2013 - Hans de Goede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_CLK_SUNXI_H_ +#define __LINUX_CLK_SUNXI_H_ + +#include + +void clk_sunxi_mmc_phase_control(struct clk_hw *hw, u8 sample, u8 output); + +#endif -- cgit v1.2.3 From ca654dc3a93d3b47dddc0c24a98043060bbb256b Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Mon, 5 May 2014 12:52:39 +0530 Subject: cpufreq: Catch double invocations of cpufreq_freq_transition_begin/end Some cpufreq drivers were redundantly invoking the _begin() and _end() APIs around frequency transitions, and this double invocation (one from the cpufreq core and the other from the cpufreq driver) used to result in a self-deadlock, leading to system hangs during boot. (The _begin() API makes contending callers wait until the previous invocation is complete. Hence, the cpufreq driver would end up waiting on itself!). Now all such drivers have been fixed, but debugging this issue was not very straight-forward (even lockdep didn't catch this). So let us add a debug infrastructure to the cpufreq core to catch such issues more easily in the future. We add a new field called 'transition_task' to the policy structure, to keep track of the task which is performing the frequency transition. Using this field, we make note of this task during _begin() and print a warning if we find a case where the same task is calling _begin() again, before completing the previous frequency transition using the corresponding _end(). We have left out ASYNC_NOTIFICATION drivers from this debug infrastructure for 2 reasons: 1. At the moment, we have no way to avoid a particular scenario where this debug infrastructure can emit false-positive warnings for such drivers. The scenario is depicted below: Task A Task B /* 1st freq transition */ Invoke _begin() { ... ... } Change the frequency /* 2nd freq transition */ Invoke _begin() { ... //waiting for B to ... //finish _end() for ... //the 1st transition ... | Got interrupt for successful ... | change of frequency (1st one). ... | ... | /* 1st freq transition */ ... | Invoke _end() { ... | ... ... V } ... ... } This scenario is actually deadlock-free because, once Task A changes the frequency, it is Task B's responsibility to invoke the corresponding _end() for the 1st frequency transition. Hence it is perfectly legal for Task A to go ahead and attempt another frequency transition in the meantime. (Of course it won't be able to proceed until Task B finishes the 1st _end(), but this doesn't cause a deadlock or a hang). The debug infrastructure cannot handle this scenario and will treat it as a deadlock and print a warning. To avoid this, we exclude such drivers from the purview of this code. 2. Luckily, we don't _need_ this infrastructure for ASYNC_NOTIFICATION drivers at all! The cpufreq core does not automatically invoke the _begin() and _end() APIs during frequency transitions in such drivers. Thus, the driver alone is responsible for invoking _begin()/_end() and hence there shouldn't be any conflicts which lead to double invocations. So, we can skip these drivers, since the probability that such drivers will hit this problem is extremely low, as outlined above. Signed-off-by: Srivatsa S. Bhat Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 14 ++++++++++++++ include/linux/cpufreq.h | 1 + 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a517da996aaf..bfe82b63875f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -365,6 +365,18 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs) { + + /* + * Catch double invocations of _begin() which lead to self-deadlock. + * ASYNC_NOTIFICATION drivers are left out because the cpufreq core + * doesn't invoke _begin() on their behalf, and hence the chances of + * double invocations are very low. Moreover, there are scenarios + * where these checks can emit false-positive warnings in these + * drivers; so we avoid that by skipping them altogether. + */ + WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) + && current == policy->transition_task); + wait: wait_event(policy->transition_wait, !policy->transition_ongoing); @@ -376,6 +388,7 @@ wait: } policy->transition_ongoing = true; + policy->transition_task = current; spin_unlock(&policy->transition_lock); @@ -392,6 +405,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, cpufreq_notify_post_transition(policy, freqs, transition_failed); policy->transition_ongoing = false; + policy->transition_task = NULL; wake_up(&policy->transition_wait); } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 77a5fa191502..f3822f836e14 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -110,6 +110,7 @@ struct cpufreq_policy { bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; wait_queue_head_t transition_wait; + struct task_struct *transition_task; /* Task which is doing the transition */ }; /* Only for ACPI */ -- cgit v1.2.3 From a0dd7b79657bd6644b914d16ce7f23468c44a7b4 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Mon, 5 May 2014 08:33:50 -0500 Subject: PM / OPP: Move cpufreq specific OPP functions out of generic OPP library CPUFreq specific helper functions for OPP (Operating Performance Points) now use generic OPP functions that allow CPUFreq to be be moved back into CPUFreq framework. This allows for independent modifications or future enhancements as needed isolated to just CPUFreq framework alone. Here, we just move relevant code and documentation to make this part of CPUFreq infrastructure. Cc: Kevin Hilman Signed-off-by: Nishanth Menon Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- Documentation/cpu-freq/core.txt | 29 +++++++++++ Documentation/power/opp.txt | 40 ++------------- drivers/base/power/opp.c | 92 --------------------------------- drivers/cpufreq/Makefile | 2 + drivers/cpufreq/cpufreq_opp.c | 110 ++++++++++++++++++++++++++++++++++++++++ include/linux/cpufreq.h | 21 ++++++++ include/linux/pm_opp.h | 20 -------- 7 files changed, 167 insertions(+), 147 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_opp.c (limited to 'include/linux') diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt index 0060d76b445f..70933eadc308 100644 --- a/Documentation/cpu-freq/core.txt +++ b/Documentation/cpu-freq/core.txt @@ -20,6 +20,7 @@ Contents: --------- 1. CPUFreq core and interfaces 2. CPUFreq notifiers +3. CPUFreq Table Generation with Operating Performance Point (OPP) 1. General Information ======================= @@ -92,3 +93,31 @@ values: cpu - number of the affected CPU old - old frequency new - new frequency + +3. CPUFreq Table Generation with Operating Performance Point (OPP) +================================================================== +For details about OPP, see Documentation/power/opp.txt + +dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with + cpufreq_frequency_table_cpuinfo which is provided with the list of + frequencies that are available for operation. This function provides + a ready to use conversion routine to translate the OPP layer's internal + information about the available frequencies into a format readily + providable to cpufreq. + + WARNING: Do not use this function in interrupt context. + + Example: + soc_pm_init() + { + /* Do things */ + r = dev_pm_opp_init_cpufreq_table(dev, &freq_table); + if (!r) + cpufreq_frequency_table_cpuinfo(policy, freq_table); + /* Do other things */ + } + + NOTE: This function is available only if CONFIG_CPU_FREQ is enabled in + addition to CONFIG_PM_OPP. + +dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt index b8a907dc0169..a9adad828cdc 100644 --- a/Documentation/power/opp.txt +++ b/Documentation/power/opp.txt @@ -10,8 +10,7 @@ Contents 3. OPP Search Functions 4. OPP Availability Control Functions 5. OPP Data Retrieval Functions -6. Cpufreq Table Generation -7. Data Structures +6. Data Structures 1. Introduction =============== @@ -72,7 +71,6 @@ operations until that OPP could be re-enabled if possible. OPP library facilitates this concept in it's implementation. The following operational functions operate only on available opps: opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count -and dev_pm_opp_init_cpufreq_table dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then be used for dev_pm_opp_enable/disable functions to make an opp available as required. @@ -96,10 +94,9 @@ using RCU read locks. The opp_find_freq_{exact,ceil,floor}, opp_get_{voltage, freq, opp_count} fall into this category. opp_{add,enable,disable} are updaters which use mutex and implement it's own -RCU locking mechanisms. dev_pm_opp_init_cpufreq_table acts as an updater and uses -mutex to implment RCU updater strategy. These functions should *NOT* be called -under RCU locks and other contexts that prevent blocking functions in RCU or -mutex operations from working. +RCU locking mechanisms. These functions should *NOT* be called under RCU locks +and other contexts that prevent blocking functions in RCU or mutex operations +from working. 2. Initial OPP List Registration ================================ @@ -311,34 +308,7 @@ dev_pm_opp_get_opp_count - Retrieve the number of available opps for a device /* Do other things */ } -6. Cpufreq Table Generation -=========================== -dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with - cpufreq_frequency_table_cpuinfo which is provided with the list of - frequencies that are available for operation. This function provides - a ready to use conversion routine to translate the OPP layer's internal - information about the available frequencies into a format readily - providable to cpufreq. - - WARNING: Do not use this function in interrupt context. - - Example: - soc_pm_init() - { - /* Do things */ - r = dev_pm_opp_init_cpufreq_table(dev, &freq_table); - if (!r) - cpufreq_frequency_table_cpuinfo(policy, freq_table); - /* Do other things */ - } - - NOTE: This function is available only if CONFIG_CPU_FREQ is enabled in - addition to CONFIG_PM as power management feature is required to - dynamically scale voltage and frequency in a system. - -dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table - -7. Data Structures +6. Data Structures ================== Typically an SoC contains multiple voltage domains which are variable. Each domain is represented by a device pointer. The relationship to OPP can be diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 38b43bb20878..d9e376a6d19d 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -596,97 +595,6 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_disable); -#ifdef CONFIG_CPU_FREQ -/** - * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device - * @dev: device for which we do this operation - * @table: Cpufreq table returned back to caller - * - * Generate a cpufreq table for a provided device- this assumes that the - * opp list is already initialized and ready for usage. - * - * This function allocates required memory for the cpufreq table. It is - * expected that the caller does the required maintenance such as freeing - * the table as required. - * - * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM - * if no memory available for the operation (table is not populated), returns 0 - * if successful and table is populated. - * - * WARNING: It is important for the callers to ensure refreshing their copy of - * the table if any of the mentioned functions have been invoked in the interim. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Since we just use the regular accessor functions to access the internal data - * structures, we use RCU read lock inside this function. As a result, users of - * this function DONOT need to use explicit locks for invoking. - */ -int dev_pm_opp_init_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - struct dev_pm_opp *opp; - struct cpufreq_frequency_table *freq_table = NULL; - int i, max_opps, ret = 0; - unsigned long rate; - - rcu_read_lock(); - - max_opps = dev_pm_opp_get_opp_count(dev); - if (max_opps <= 0) { - ret = max_opps ? max_opps : -ENODATA; - goto out; - } - - freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); - if (!freq_table) { - ret = -ENOMEM; - goto out; - } - - for (i = 0, rate = 0; i < max_opps; i++, rate++) { - /* find next rate */ - opp = dev_pm_opp_find_freq_ceil(dev, &rate); - if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - goto out; - } - freq_table[i].driver_data = i; - freq_table[i].frequency = rate / 1000; - } - - freq_table[i].driver_data = i; - freq_table[i].frequency = CPUFREQ_TABLE_END; - - *table = &freq_table[0]; - -out: - rcu_read_unlock(); - if (ret) - kfree(freq_table); - - return ret; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); - -/** - * dev_pm_opp_free_cpufreq_table() - free the cpufreq table - * @dev: device for which we do this operation - * @table: table to free - * - * Free up the table allocated by dev_pm_opp_init_cpufreq_table - */ -void dev_pm_opp_free_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - if (!table) - return; - - kfree(*table); - *table = NULL; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); -#endif /* CONFIG_CPU_FREQ */ - /** * dev_pm_opp_get_notifier() - find notifier_head of the device with opp * @dev: device pointer used to lookup device OPPs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 0dbb963c1aef..738c8b7b17dc 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -1,5 +1,7 @@ # CPUfreq core obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o +obj-$(CONFIG_PM_OPP) += cpufreq_opp.o + # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c new file mode 100644 index 000000000000..c0c6f4a4eccf --- /dev/null +++ b/drivers/cpufreq/cpufreq_opp.c @@ -0,0 +1,110 @@ +/* + * Generic OPP helper interface for CPUFreq drivers + * + * Copyright (C) 2009-2014 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Since we just use the regular accessor functions to access the internal data + * structures, we use RCU read lock inside this function. As a result, users of + * this function DONOT need to use explicit locks for invoking. + */ +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = NULL; + int i, max_opps, ret = 0; + unsigned long rate; + + rcu_read_lock(); + + max_opps = dev_pm_opp_get_opp_count(dev); + if (max_opps <= 0) { + ret = max_opps ? max_opps : -ENODATA; + goto out; + } + + freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); + if (!freq_table) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, rate = 0; i < max_opps; i++, rate++) { + /* find next rate */ + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out; + } + freq_table[i].driver_data = i; + freq_table[i].frequency = rate / 1000; + } + + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + +out: + rcu_read_unlock(); + if (ret) + kfree(freq_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); + +/** + * dev_pm_opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by dev_pm_opp_init_cpufreq_table + */ +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f3822f836e14..9d803b529ac2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -469,6 +469,27 @@ struct cpufreq_frequency_table { * order */ }; +#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +#else +static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table + **table) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table + **table) +{ +} +#endif + + bool cpufreq_next_valid(struct cpufreq_frequency_table **pos); /* diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5151b0059585..0330217abfad 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -15,7 +15,6 @@ #define __LINUX_OPP_H__ #include -#include #include struct dev_pm_opp; @@ -117,23 +116,4 @@ static inline int of_init_opp_table(struct device *dev) } #endif -#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) -int dev_pm_opp_init_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table); -void dev_pm_opp_free_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table); -#else -static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - return -EINVAL; -} - -static inline -void dev_pm_opp_free_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ -} -#endif /* CONFIG_CPU_FREQ */ - #endif /* __LINUX_OPP_H__ */ -- cgit v1.2.3 From a6220fc19afc07fe77cfd16f5b8e568615517091 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 May 2014 00:51:54 +0200 Subject: PM / suspend: Always use deepest C-state in the "freeze" sleep state If freeze_enter() is called, we want to bypass the current cpuidle governor and always use the deepest available (that is, not disabled) C-state, because we want to save as much energy as reasonably possible then and runtime latency constraints don't matter at that point, since the system is in a sleep state anyway. Signed-off-by: Rafael J. Wysocki Tested-by: Aubrey Li --- drivers/cpuidle/cpuidle.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/cpuidle.h | 2 ++ kernel/power/suspend.c | 2 ++ 3 files changed, 48 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index f38359f64cc6..cb7019977c50 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -32,6 +32,7 @@ LIST_HEAD(cpuidle_detected_devices); static int enabled_devices; static int off __read_mostly; static int initialized __read_mostly; +static bool use_deepest_state __read_mostly; int cpuidle_disabled(void) { @@ -64,6 +65,45 @@ int cpuidle_play_dead(void) return -ENODEV; } +/** + * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. + * @enable: Whether enable or disable the feature. + * + * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and + * always use the state with the greatest exit latency (out of the states that + * are not disabled). + * + * This function can only be called after cpuidle_pause() to avoid races. + */ +void cpuidle_use_deepest_state(bool enable) +{ + use_deepest_state = enable; +} + +/** + * cpuidle_find_deepest_state - Find the state of the greatest exit latency. + * @drv: cpuidle driver for a given CPU. + * @dev: cpuidle device for a given CPU. + */ +static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + unsigned int latency_req = 0; + int i, ret = CPUIDLE_DRIVER_STATE_START - 1; + + for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; + struct cpuidle_state_usage *su = &dev->states_usage[i]; + + if (s->disabled || su->disable || s->exit_latency <= latency_req) + continue; + + latency_req = s->exit_latency; + ret = i; + } + return ret; +} + /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu @@ -124,6 +164,9 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) if (!drv || !dev || !dev->enabled) return -EBUSY; + if (unlikely(use_deepest_state)) + return cpuidle_find_deepest_state(drv, dev); + return cpuidle_curr_governor->select(drv, dev); } @@ -155,7 +198,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ void cpuidle_reflect(struct cpuidle_device *dev, int index) { - if (cpuidle_curr_governor->reflect) + if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) cpuidle_curr_governor->reflect(dev, index); } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index a8d5bd391a26..c51a436135c4 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -143,6 +143,7 @@ extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); +extern void cpuidle_use_deepest_state(bool enable); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else @@ -175,6 +176,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } +static inline void cpuidle_use_deepest_state(bool enable) {} static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } #endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8233cd4047d7..155721f7f909 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -54,9 +54,11 @@ static void freeze_begin(void) static void freeze_enter(void) { + cpuidle_use_deepest_state(true); cpuidle_resume(); wait_event(suspend_freeze_wait_head, suspend_freeze_wake); cpuidle_pause(); + cpuidle_use_deepest_state(false); } void freeze_wake(void) -- cgit v1.2.3 From 143e1e28cb40bed836b0a06567208bd7347c9672 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 11 Apr 2014 11:44:37 +0200 Subject: sched: Rework sched_domain topology definition We replace the old way to configure the scheduler topology with a new method which enables a platform to declare additionnal level (if needed). We still have a default topology table definition that can be used by platform that don't want more level than the SMT, MC, CPU and NUMA ones. This table can be overwritten by an arch which either wants to add new level where a load balance make sense like BOOK or powergating level or wants to change the flags configuration of some levels. For each level, we need a function pointer that returns cpumask for each cpu, a function pointer that returns the flags for the level and a name. Only flags that describe topology, can be set by an architecture. The current topology flags are: SD_SHARE_CPUPOWER SD_SHARE_PKG_RESOURCES SD_NUMA SD_ASYM_PACKING Then, each level must be a subset on the next one. The build sequence of the sched_domain will take care of removing useless levels like those with 1 CPU and those with the same CPU span and no more relevant information for load balancing than its children. Signed-off-by: Vincent Guittot Tested-by: Dietmar Eggemann Reviewed-by: Preeti U Murthy Reviewed-by: Dietmar Eggemann Signed-off-by: Peter Zijlstra Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Bjorn Helgaas Cc: Chris Metcalf Cc: Christoph Lameter Cc: David S. Miller Cc: Fenghua Yu Cc: Greg Kroah-Hartman Cc: Hanjun Guo Cc: Heiko Carstens Cc: Jason Low Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Tony Luck Cc: linux390@de.ibm.com Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Link: http://lkml.kernel.org/r/1397209481-28542-2-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/topology.h | 24 ---- arch/s390/include/asm/topology.h | 2 - arch/tile/include/asm/topology.h | 33 ------ include/linux/sched.h | 53 +++++++++ include/linux/topology.h | 128 +++------------------ kernel/sched/core.c | 233 ++++++++++++++++++++------------------- 6 files changed, 186 insertions(+), 287 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 5cb55a1e606b..3202aa74e0d6 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -46,30 +46,6 @@ void build_cpu_to_node_map(void); -#define SD_CPU_INIT (struct sched_domain) { \ - .parent = NULL, \ - .child = NULL, \ - .groups = NULL, \ - .min_interval = 1, \ - .max_interval = 4, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 2, \ - .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 0, \ - .wake_idx = 0, \ - .forkexec_idx = 0, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_EXEC \ - | SD_BALANCE_FORK \ - | SD_WAKE_AFFINE, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .nr_balance_failed = 0, \ -} - #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 05425b18c0aa..07763bdb408d 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -64,8 +64,6 @@ static inline void s390_init_cpu_topology(void) }; #endif -#define SD_BOOK_INIT SD_CPU_INIT - #include #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index d15c0d8d550f..938311844233 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h @@ -44,39 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node) /* For now, use numa node -1 for global allocation. */ #define pcibus_to_node(bus) ((void)(bus), -1) -/* - * TILE architecture has many cores integrated in one processor, so we need - * setup bigger balance_interval for both CPU/NODE scheduling domains to - * reduce process scheduling costs. - */ - -/* sched_domains SD_CPU_INIT for TILE architecture */ -#define SD_CPU_INIT (struct sched_domain) { \ - .min_interval = 4, \ - .max_interval = 128, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 0, \ - .wake_idx = 0, \ - .forkexec_idx = 0, \ - \ - .flags = 1*SD_LOAD_BALANCE \ - | 1*SD_BALANCE_NEWIDLE \ - | 1*SD_BALANCE_EXEC \ - | 1*SD_BALANCE_FORK \ - | 0*SD_BALANCE_WAKE \ - | 0*SD_WAKE_AFFINE \ - | 0*SD_SHARE_CPUPOWER \ - | 0*SD_SHARE_PKG_RESOURCES \ - | 0*SD_SERIALIZE \ - , \ - .last_balance = jiffies, \ - .balance_interval = 32, \ -} - /* By definition, we create nodes based on online memory. */ #define node_has_online_mem(nid) 1 diff --git a/include/linux/sched.h b/include/linux/sched.h index 2a4298fb0d85..656b035c30e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -879,6 +879,27 @@ enum cpu_idle_type { extern int __weak arch_sd_sibiling_asym_packing(void); +#ifdef CONFIG_SCHED_SMT +static inline const int cpu_smt_flags(void) +{ + return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline const int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline const int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + struct sched_domain_attr { int relax_domain_level; }; @@ -985,6 +1006,38 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef const int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain **__percpu sd; + struct sched_group **__percpu sg; + struct sched_group_power **__percpu sgp; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern struct sched_domain_topology_level *sched_domain_topology; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + #else /* CONFIG_SMP */ struct sched_domain_attr; diff --git a/include/linux/topology.h b/include/linux/topology.h index 7062330a1329..973671ff9e7d 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -66,121 +66,6 @@ int arch_update_cpu_topology(void); #define PENALTY_FOR_NODE_WITH_CPUS (1) #endif -/* - * Below are the 3 major initializers used in building sched_domains: - * SD_SIBLING_INIT, for SMT domains - * SD_CPU_INIT, for SMP domains - * - * Any architecture that cares to do any tuning to these values should do so - * by defining their own arch-specific initializer in include/asm/topology.h. - * A definition there will automagically override these default initializers - * and allow arch-specific performance tuning of sched_domains. - * (Only non-zero and non-null fields need be specified.) - */ - -#ifdef CONFIG_SCHED_SMT -/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is, - * so can't we drop this in favor of CONFIG_SCHED_SMT? - */ -#define ARCH_HAS_SCHED_WAKE_IDLE -/* Common values for SMT siblings */ -#ifndef SD_SIBLING_INIT -#define SD_SIBLING_INIT (struct sched_domain) { \ - .min_interval = 1, \ - .max_interval = 2, \ - .busy_factor = 64, \ - .imbalance_pct = 110, \ - \ - .flags = 1*SD_LOAD_BALANCE \ - | 1*SD_BALANCE_NEWIDLE \ - | 1*SD_BALANCE_EXEC \ - | 1*SD_BALANCE_FORK \ - | 0*SD_BALANCE_WAKE \ - | 1*SD_WAKE_AFFINE \ - | 1*SD_SHARE_CPUPOWER \ - | 1*SD_SHARE_PKG_RESOURCES \ - | 0*SD_SERIALIZE \ - | 0*SD_PREFER_SIBLING \ - | arch_sd_sibling_asym_packing() \ - , \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .smt_gain = 1178, /* 15% */ \ - .max_newidle_lb_cost = 0, \ - .next_decay_max_lb_cost = jiffies, \ -} -#endif -#endif /* CONFIG_SCHED_SMT */ - -#ifdef CONFIG_SCHED_MC -/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ -#ifndef SD_MC_INIT -#define SD_MC_INIT (struct sched_domain) { \ - .min_interval = 1, \ - .max_interval = 4, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .wake_idx = 0, \ - .forkexec_idx = 0, \ - \ - .flags = 1*SD_LOAD_BALANCE \ - | 1*SD_BALANCE_NEWIDLE \ - | 1*SD_BALANCE_EXEC \ - | 1*SD_BALANCE_FORK \ - | 0*SD_BALANCE_WAKE \ - | 1*SD_WAKE_AFFINE \ - | 0*SD_SHARE_CPUPOWER \ - | 1*SD_SHARE_PKG_RESOURCES \ - | 0*SD_SERIALIZE \ - , \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .max_newidle_lb_cost = 0, \ - .next_decay_max_lb_cost = jiffies, \ -} -#endif -#endif /* CONFIG_SCHED_MC */ - -/* Common values for CPUs */ -#ifndef SD_CPU_INIT -#define SD_CPU_INIT (struct sched_domain) { \ - .min_interval = 1, \ - .max_interval = 4, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 0, \ - .wake_idx = 0, \ - .forkexec_idx = 0, \ - \ - .flags = 1*SD_LOAD_BALANCE \ - | 1*SD_BALANCE_NEWIDLE \ - | 1*SD_BALANCE_EXEC \ - | 1*SD_BALANCE_FORK \ - | 0*SD_BALANCE_WAKE \ - | 1*SD_WAKE_AFFINE \ - | 0*SD_SHARE_CPUPOWER \ - | 0*SD_SHARE_PKG_RESOURCES \ - | 0*SD_SERIALIZE \ - | 1*SD_PREFER_SIBLING \ - , \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .max_newidle_lb_cost = 0, \ - .next_decay_max_lb_cost = jiffies, \ -} -#endif - -#ifdef CONFIG_SCHED_BOOK -#ifndef SD_BOOK_INIT -#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!! -#endif -#endif /* CONFIG_SCHED_BOOK */ - #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DECLARE_PER_CPU(int, numa_node); @@ -295,4 +180,17 @@ static inline int cpu_to_mem(int cpu) #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif +#ifdef CONFIG_SCHED_SMT +static inline const struct cpumask *cpu_smt_mask(int cpu) +{ + return topology_thread_cpumask(cpu); +} +#endif + +static inline const struct cpumask *cpu_cpu_mask(int cpu) +{ + return cpumask_of_node(cpu_to_node(cpu)); +} + + #endif /* _LINUX_TOPOLOGY_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 13584f1cccfc..7d332b7899cc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5566,17 +5566,6 @@ static int __init isolated_cpu_setup(char *str) __setup("isolcpus=", isolated_cpu_setup); -static const struct cpumask *cpu_cpu_mask(int cpu) -{ - return cpumask_of_node(cpu_to_node(cpu)); -} - -struct sd_data { - struct sched_domain **__percpu sd; - struct sched_group **__percpu sg; - struct sched_group_power **__percpu sgp; -}; - struct s_data { struct sched_domain ** __percpu sd; struct root_domain *rd; @@ -5589,21 +5578,6 @@ enum s_alloc { sa_none, }; -struct sched_domain_topology_level; - -typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); -typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); - -#define SDTL_OVERLAP 0x01 - -struct sched_domain_topology_level { - sched_domain_init_f init; - sched_domain_mask_f mask; - int flags; - int numa_level; - struct sd_data data; -}; - /* * Build an iteration mask that can exclude certain CPUs from the upwards * domain traversal. @@ -5832,34 +5806,6 @@ int __weak arch_sd_sibling_asym_packing(void) * Non-inlined to reduce accumulated stack pressure in build_sched_domains() */ -#ifdef CONFIG_SCHED_DEBUG -# define SD_INIT_NAME(sd, type) sd->name = #type -#else -# define SD_INIT_NAME(sd, type) do { } while (0) -#endif - -#define SD_INIT_FUNC(type) \ -static noinline struct sched_domain * \ -sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ -{ \ - struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ - *sd = SD_##type##_INIT; \ - SD_INIT_NAME(sd, type); \ - sd->private = &tl->data; \ - return sd; \ -} - -SD_INIT_FUNC(CPU) -#ifdef CONFIG_SCHED_SMT - SD_INIT_FUNC(SIBLING) -#endif -#ifdef CONFIG_SCHED_MC - SD_INIT_FUNC(MC) -#endif -#ifdef CONFIG_SCHED_BOOK - SD_INIT_FUNC(BOOK) -#endif - static int default_relax_domain_level = -1; int sched_domain_level_max; @@ -5947,99 +5893,156 @@ static void claim_allocations(int cpu, struct sched_domain *sd) *per_cpu_ptr(sdd->sgp, cpu) = NULL; } -#ifdef CONFIG_SCHED_SMT -static const struct cpumask *cpu_smt_mask(int cpu) -{ - return topology_thread_cpumask(cpu); -} -#endif - -/* - * Topology list, bottom-up. - */ -static struct sched_domain_topology_level default_topology[] = { -#ifdef CONFIG_SCHED_SMT - { sd_init_SIBLING, cpu_smt_mask, }, -#endif -#ifdef CONFIG_SCHED_MC - { sd_init_MC, cpu_coregroup_mask, }, -#endif -#ifdef CONFIG_SCHED_BOOK - { sd_init_BOOK, cpu_book_mask, }, -#endif - { sd_init_CPU, cpu_cpu_mask, }, - { NULL, }, -}; - -static struct sched_domain_topology_level *sched_domain_topology = default_topology; - -#define for_each_sd_topology(tl) \ - for (tl = sched_domain_topology; tl->init; tl++) - #ifdef CONFIG_NUMA - static int sched_domains_numa_levels; static int *sched_domains_numa_distance; static struct cpumask ***sched_domains_numa_masks; static int sched_domains_curr_level; +#endif -static inline int sd_local_flags(int level) -{ - if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) - return 0; - - return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; -} +/* + * SD_flags allowed in topology descriptions. + * + * SD_SHARE_CPUPOWER - describes SMT topologies + * SD_SHARE_PKG_RESOURCES - describes shared caches + * SD_NUMA - describes NUMA topologies + * + * Odd one out: + * SD_ASYM_PACKING - describes SMT quirks + */ +#define TOPOLOGY_SD_FLAGS \ + (SD_SHARE_CPUPOWER | \ + SD_SHARE_PKG_RESOURCES | \ + SD_NUMA | \ + SD_ASYM_PACKING) static struct sched_domain * -sd_numa_init(struct sched_domain_topology_level *tl, int cpu) +sd_init(struct sched_domain_topology_level *tl, int cpu) { struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); - int level = tl->numa_level; - int sd_weight = cpumask_weight( - sched_domains_numa_masks[level][cpu_to_node(cpu)]); + int sd_weight, sd_flags = 0; + +#ifdef CONFIG_NUMA + /* + * Ugly hack to pass state to sd_numa_mask()... + */ + sched_domains_curr_level = tl->numa_level; +#endif + + sd_weight = cpumask_weight(tl->mask(cpu)); + + if (tl->sd_flags) + sd_flags = (*tl->sd_flags)(); + if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, + "wrong sd_flags in topology description\n")) + sd_flags &= ~TOPOLOGY_SD_FLAGS; *sd = (struct sched_domain){ .min_interval = sd_weight, .max_interval = 2*sd_weight, .busy_factor = 32, .imbalance_pct = 125, - .cache_nice_tries = 2, - .busy_idx = 3, - .idle_idx = 2, + + .cache_nice_tries = 0, + .busy_idx = 0, + .idle_idx = 0, .newidle_idx = 0, .wake_idx = 0, .forkexec_idx = 0, .flags = 1*SD_LOAD_BALANCE | 1*SD_BALANCE_NEWIDLE - | 0*SD_BALANCE_EXEC - | 0*SD_BALANCE_FORK + | 1*SD_BALANCE_EXEC + | 1*SD_BALANCE_FORK | 0*SD_BALANCE_WAKE - | 0*SD_WAKE_AFFINE + | 1*SD_WAKE_AFFINE | 0*SD_SHARE_CPUPOWER | 0*SD_SHARE_PKG_RESOURCES - | 1*SD_SERIALIZE + | 0*SD_SERIALIZE | 0*SD_PREFER_SIBLING - | 1*SD_NUMA - | sd_local_flags(level) + | 0*SD_NUMA + | sd_flags , + .last_balance = jiffies, .balance_interval = sd_weight, + .smt_gain = 0, .max_newidle_lb_cost = 0, .next_decay_max_lb_cost = jiffies, +#ifdef CONFIG_SCHED_DEBUG + .name = tl->name, +#endif }; - SD_INIT_NAME(sd, NUMA); - sd->private = &tl->data; /* - * Ugly hack to pass state to sd_numa_mask()... + * Convert topological properties into behaviour. */ - sched_domains_curr_level = tl->numa_level; + + if (sd->flags & SD_SHARE_CPUPOWER) { + sd->imbalance_pct = 110; + sd->smt_gain = 1178; /* ~15% */ + sd->flags |= arch_sd_sibling_asym_packing(); + + } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { + sd->imbalance_pct = 117; + sd->cache_nice_tries = 1; + sd->busy_idx = 2; + +#ifdef CONFIG_NUMA + } else if (sd->flags & SD_NUMA) { + sd->cache_nice_tries = 2; + sd->busy_idx = 3; + sd->idle_idx = 2; + + sd->flags |= SD_SERIALIZE; + if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { + sd->flags &= ~(SD_BALANCE_EXEC | + SD_BALANCE_FORK | + SD_WAKE_AFFINE); + } + +#endif + } else { + sd->flags |= SD_PREFER_SIBLING; + sd->cache_nice_tries = 1; + sd->busy_idx = 2; + sd->idle_idx = 1; + } + + sd->private = &tl->data; return sd; } +/* + * Topology list, bottom-up. + */ +static struct sched_domain_topology_level default_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, +#endif +#ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, +#endif +#ifdef CONFIG_SCHED_BOOK + { cpu_book_mask, SD_INIT_NAME(BOOK) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + +struct sched_domain_topology_level *sched_domain_topology = default_topology; + +#define for_each_sd_topology(tl) \ + for (tl = sched_domain_topology; tl->mask; tl++) + +void set_sched_topology(struct sched_domain_topology_level *tl) +{ + sched_domain_topology = tl; +} + +#ifdef CONFIG_NUMA + static const struct cpumask *sd_numa_mask(int cpu) { return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; @@ -6183,7 +6186,10 @@ static void sched_init_numa(void) } } - tl = kzalloc((ARRAY_SIZE(default_topology) + level) * + /* Compute default topology size */ + for (i = 0; sched_domain_topology[i].mask; i++); + + tl = kzalloc((i + level) * sizeof(struct sched_domain_topology_level), GFP_KERNEL); if (!tl) return; @@ -6191,18 +6197,19 @@ static void sched_init_numa(void) /* * Copy the default topology bits.. */ - for (i = 0; default_topology[i].init; i++) - tl[i] = default_topology[i]; + for (i = 0; sched_domain_topology[i].mask; i++) + tl[i] = sched_domain_topology[i]; /* * .. and append 'j' levels of NUMA goodness. */ for (j = 0; j < level; i++, j++) { tl[i] = (struct sched_domain_topology_level){ - .init = sd_numa_init, .mask = sd_numa_mask, + .sd_flags = cpu_numa_flags, .flags = SDTL_OVERLAP, .numa_level = j, + SD_INIT_NAME(NUMA) }; } @@ -6360,7 +6367,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int cpu) { - struct sched_domain *sd = tl->init(tl, cpu); + struct sched_domain *sd = sd_init(tl, cpu); if (!sd) return child; -- cgit v1.2.3 From 607b45e9a216e89a63351556e488eea06be0ff48 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 11 Apr 2014 11:44:39 +0200 Subject: sched, powerpc: Create a dedicated topology table Create a dedicated topology table for handling asymetric feature of powerpc. Signed-off-by: Vincent Guittot Reviewed-by: Preeti U Murthy Signed-off-by: Peter Zijlstra Cc: Andy Fleming Cc: Anton Blanchard Cc: Benjamin Herrenschmidt Cc: Grant Likely Cc: Linus Torvalds Cc: Michael Ellerman Cc: Paul Gortmaker Cc: Paul Mackerras Cc: Preeti U. Murthy Cc: Rob Herring Cc: Srivatsa S. Bhat Cc: Toshi Kani Cc: Vasant Hegde Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com Cc: schwidefsky@de.ibm.com Cc: cmetcalf@tilera.com Cc: dietmar.eggemann@arm.com Cc: devicetree@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1397209481-28542-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/smp.c | 31 +++++++++++++++++++++++-------- include/linux/sched.h | 2 -- kernel/sched/core.c | 6 ------ 3 files changed, 23 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e2a4232c5871..10ffffef0414 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -766,6 +766,28 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } +#ifdef CONFIG_SCHED_SMT +/* cpumask of CPUs with asymetric SMT dependancy */ +static const int powerpc_smt_flags(void) +{ + int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; + + if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { + printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); + flags |= SD_ASYM_PACKING; + } + return flags; +} +#endif + +static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + void __init smp_cpus_done(unsigned int max_cpus) { cpumask_var_t old_mask; @@ -790,15 +812,8 @@ void __init smp_cpus_done(unsigned int max_cpus) dump_numa_cpu_topology(); -} + set_sched_topology(powerpc_topology); -int arch_sd_sibling_asym_packing(void) -{ - if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { - printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); - return SD_ASYM_PACKING; - } - return 0; } #ifdef CONFIG_HOTPLUG_CPU diff --git a/include/linux/sched.h b/include/linux/sched.h index 656b035c30e5..439a153b8403 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -877,8 +877,6 @@ enum cpu_idle_type { #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ #define SD_NUMA 0x4000 /* cross-node balancing */ -extern int __weak arch_sd_sibiling_asym_packing(void); - #ifdef CONFIG_SCHED_SMT static inline const int cpu_smt_flags(void) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e59e5aec745a..7e348e238bf1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5796,11 +5796,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); } -int __weak arch_sd_sibling_asym_packing(void) -{ - return 0*SD_ASYM_PACKING; -} - /* * Initializers for schedule domains * Non-inlined to reduce accumulated stack pressure in build_sched_domains() @@ -5981,7 +5976,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) if (sd->flags & SD_SHARE_CPUPOWER) { sd->imbalance_pct = 110; sd->smt_gain = 1178; /* ~15% */ - sd->flags |= arch_sd_sibling_asym_packing(); } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { sd->imbalance_pct = 117; -- cgit v1.2.3 From d77b3ed5c9f8ebedf154b52b5e943c461f3d37e6 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 11 Apr 2014 11:44:40 +0200 Subject: sched: Add a new SD_SHARE_POWERDOMAIN for sched_domain A new flag SD_SHARE_POWERDOMAIN is created to reflect whether groups of CPUs in a sched_domain level can or not reach different power state. As an example, the flag should be cleared at CPU level if groups of cores can be power gated independently. This information can be used in the load balance decision or to add load balancing level between group of CPUs that can power gate independantly. This flag is part of the topology flags that can be set by arch. Reviewed-by: Dietmar Eggemann Tested-by: Dietmar Eggemann Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com Cc: schwidefsky@de.ibm.com Cc: cmetcalf@tilera.com Cc: benh@kernel.crashing.org Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1397209481-28542-5-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + kernel/sched/core.c | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 439a153b8403..accb66bfd722 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -870,6 +870,7 @@ enum cpu_idle_type { #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7e348e238bf1..1c9c3b7b26af 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5261,7 +5261,8 @@ static int sd_degenerate(struct sched_domain *sd) SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | - SD_SHARE_PKG_RESOURCES)) { + SD_SHARE_PKG_RESOURCES | + SD_SHARE_POWERDOMAIN)) { if (sd->groups != sd->groups->next) return 0; } @@ -5292,7 +5293,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES | - SD_PREFER_SIBLING); + SD_PREFER_SIBLING | + SD_SHARE_POWERDOMAIN); if (nr_node_ids == 1) pflags &= ~SD_SERIALIZE; } @@ -5901,6 +5903,7 @@ static int sched_domains_curr_level; * SD_SHARE_CPUPOWER - describes SMT topologies * SD_SHARE_PKG_RESOURCES - describes shared caches * SD_NUMA - describes NUMA topologies + * SD_SHARE_POWERDOMAIN - describes shared power domain * * Odd one out: * SD_ASYM_PACKING - describes SMT quirks @@ -5909,7 +5912,8 @@ static int sched_domains_curr_level; (SD_SHARE_CPUPOWER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ - SD_ASYM_PACKING) + SD_ASYM_PACKING | \ + SD_SHARE_POWERDOMAIN) static struct sched_domain * sd_init(struct sched_domain_topology_level *tl, int cpu) -- cgit v1.2.3 From 591c1ee465ce5372385dbc41e7d3e36cbb477bd8 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 24 Apr 2014 11:30:04 -0400 Subject: of: configure the platform device dma parameters Retrieve DMA configuration from DT and setup platform device's DMA parameters. The DMA configuration in DT has to be specified using "dma-ranges" and "dma-coherent" properties if supported. We setup dma_pfn_offset using "dma-ranges" and dma_coherent_ops using "dma-coherent" device tree properties. The set_arch_dma_coherent_ops macro has to be defined by arch if it supports coherent dma_ops. Otherwise, set_arch_dma_coherent_ops() is declared as nop. Cc: Greg Kroah-Hartman Cc: Russell King Cc: Arnd Bergmann Cc: Olof Johansson Cc: Grant Likely Cc: Catalin Marinas Cc: Linus Walleij Reviewed-by: Rob Herring Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar --- drivers/of/platform.c | 65 ++++++++++++++++++++++++++++++++++++++++----- include/linux/dma-mapping.h | 7 +++++ 2 files changed, 66 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 404d1daebefa..91fa9838b56f 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -186,6 +186,64 @@ struct platform_device *of_device_alloc(struct device_node *np, } EXPORT_SYMBOL(of_device_alloc); +/** + * of_dma_configure - Setup DMA configuration + * @dev: Device to apply DMA configuration + * + * Try to get devices's DMA configuration from DT and update it + * accordingly. + * + * In case if platform code need to use own special DMA configuration,it + * can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event + * to fix up DMA configuration. + */ +static void of_dma_configure(struct platform_device *pdev) +{ + u64 dma_addr, paddr, size; + int ret; + struct device *dev = &pdev->dev; + +#if defined(CONFIG_MICROBLAZE) + pdev->archdata.dma_mask = 0xffffffffUL; +#endif + + /* + * Set default dma-mask to 32 bit. Drivers are expected to setup + * the correct supported dma_mask. + */ + dev->coherent_dma_mask = DMA_BIT_MASK(32); + + /* + * Set it to coherent_dma_mask by default if the architecture + * code has not set it. + */ + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + + /* + * if dma-coherent property exist, call arch hook to setup + * dma coherent operations. + */ + if (of_dma_is_coherent(dev->of_node)) { + set_arch_dma_coherent_ops(dev); + dev_dbg(dev, "device is dma coherent\n"); + } + + /* + * if dma-ranges property doesn't exist - just return else + * setup the dma offset + */ + ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size); + if (ret < 0) { + dev_dbg(dev, "no dma range information to setup\n"); + return; + } + + /* DMA ranges found. Calculate and set dma_pfn_offset */ + dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr); + dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); +} + /** * of_platform_device_create_pdata - Alloc, initialize and register an of_device * @np: pointer to node to create device for @@ -211,12 +269,7 @@ static struct platform_device *of_platform_device_create_pdata( if (!dev) return NULL; -#if defined(CONFIG_MICROBLAZE) - dev->archdata.dma_mask = 0xffffffffUL; -#endif - dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - if (!dev->dev.dma_mask) - dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + of_dma_configure(dev); dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index fd4aee29ad10..c7d9b1b14ce7 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -123,6 +123,13 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) extern u64 dma_get_required_mask(struct device *dev); +#ifndef set_arch_dma_coherent_ops +static inline int set_arch_dma_coherent_ops(struct device *dev) +{ + return 0; +} +#endif + static inline unsigned int dma_get_max_seg_size(struct device *dev) { return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; -- cgit v1.2.3 From 506e931f92defdc60c1dc4aa2ff4a19a5dcd8618 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 7 May 2014 10:26:44 -0600 Subject: blk-mq: add basic round-robin of what CPU to queue workqueue work on Right now we just pick the first CPU in the mask, but that can easily overload that one. Add some basic batching and round-robin all the entries in the mask instead. Signed-off-by: Jens Axboe --- block/blk-mq.c | 45 +++++++++++++++++++++++++++++++-------------- include/linux/blk-mq.h | 4 ++++ 2 files changed, 35 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 0d379830a278..2410e0cb7aef 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -670,6 +670,30 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) } } +/* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement. + * For now we just round-robin here, switching for every + * BLK_MQ_CPU_WORK_BATCH queued items. + */ +static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) +{ + int cpu = hctx->next_cpu; + + if (--hctx->next_cpu_batch <= 0) { + int next_cpu; + + next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(hctx->cpumask); + + hctx->next_cpu = next_cpu; + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } + + return cpu; +} + void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) @@ -682,13 +706,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) else { unsigned int cpu; - /* - * It'd be great if the workqueue API had a way to pass - * in a mask and had some smarts for more clever placement - * than the first CPU. Or we could round-robin here. For now, - * just queue on the first CPU. - */ - cpu = cpumask_first(hctx->cpumask); + cpu = blk_mq_hctx_next_cpu(hctx); kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0); } } @@ -795,13 +813,7 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) else { unsigned int cpu; - /* - * It'd be great if the workqueue API had a way to pass - * in a mask and had some smarts for more clever placement - * than the first CPU. Or we could round-robin here. For now, - * just queue on the first CPU. - */ - cpu = cpumask_first(hctx->cpumask); + cpu = blk_mq_hctx_next_cpu(hctx); kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo); } } @@ -1378,6 +1390,11 @@ static void blk_mq_map_swqueue(struct request_queue *q) ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + + queue_for_each_hw_ctx(q, hctx, i) { + hctx->next_cpu = cpumask_first(hctx->cpumask); + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } } struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3b561d651a02..5bd677e2dcb7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -21,6 +21,8 @@ struct blk_mq_hw_ctx { struct delayed_work run_work; struct delayed_work delay_work; cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; unsigned long flags; /* BLK_MQ_F_* flags */ @@ -126,6 +128,8 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_MAX_DEPTH = 2048, + + BLK_MQ_CPU_WORK_BATCH = 8, }; struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); -- cgit v1.2.3 From 80eded6ce8bb8bade60955660c6957d6166c44c1 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Wed, 7 May 2014 18:02:15 +0200 Subject: clk: at91: add slow clks driver AT91 slow clk is a clk multiplexer. In some SoCs (sam9x5, sama5, sam9g45 families) this multiplexer can choose among 2 sources: an internal RC oscillator circuit and an oscillator using an external crystal. In other Socs (sam9260 family) the multiplexer source is hardcoded with the OSCSEL signal. Signed-off-by: Boris BREZILLON Acked-by: Mike Turquette Signed-off-by: Nicolas Ferre --- drivers/clk/at91/Makefile | 4 +- drivers/clk/at91/clk-slow.c | 467 +++++++++++++++++++++++++++++++++++++++++++ drivers/clk/at91/pmc.c | 5 + drivers/clk/at91/pmc.h | 3 + drivers/clk/at91/sckc.c | 57 ++++++ drivers/clk/at91/sckc.h | 22 ++ include/linux/clk/at91_pmc.h | 1 + 7 files changed, 557 insertions(+), 2 deletions(-) create mode 100644 drivers/clk/at91/clk-slow.c create mode 100644 drivers/clk/at91/sckc.c create mode 100644 drivers/clk/at91/sckc.h (limited to 'include/linux') diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 46c1d3d0d66b..4998aee59267 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -2,8 +2,8 @@ # Makefile for at91 specific clk # -obj-y += pmc.o -obj-y += clk-main.o clk-pll.o clk-plldiv.o clk-master.o +obj-y += pmc.o sckc.o +obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o obj-y += clk-system.o clk-peripheral.o clk-programmable.o obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c new file mode 100644 index 000000000000..0300c46ee247 --- /dev/null +++ b/drivers/clk/at91/clk-slow.c @@ -0,0 +1,467 @@ +/* + * drivers/clk/at91/clk-slow.c + * + * Copyright (C) 2013 Boris BREZILLON + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pmc.h" +#include "sckc.h" + +#define SLOW_CLOCK_FREQ 32768 +#define SLOWCK_SW_CYCLES 5 +#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \ + SLOW_CLOCK_FREQ) + +#define AT91_SCKC_CR 0x00 +#define AT91_SCKC_RCEN (1 << 0) +#define AT91_SCKC_OSC32EN (1 << 1) +#define AT91_SCKC_OSC32BYP (1 << 2) +#define AT91_SCKC_OSCSEL (1 << 3) + +struct clk_slow_osc { + struct clk_hw hw; + void __iomem *sckcr; + unsigned long startup_usec; +}; + +#define to_clk_slow_osc(hw) container_of(hw, struct clk_slow_osc, hw) + +struct clk_slow_rc_osc { + struct clk_hw hw; + void __iomem *sckcr; + unsigned long frequency; + unsigned long accuracy; + unsigned long startup_usec; +}; + +#define to_clk_slow_rc_osc(hw) container_of(hw, struct clk_slow_rc_osc, hw) + +struct clk_sam9260_slow { + struct clk_hw hw; + struct at91_pmc *pmc; +}; + +#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw) + +struct clk_sam9x5_slow { + struct clk_hw hw; + void __iomem *sckcr; + u8 parent; +}; + +#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw) + + +static int clk_slow_osc_prepare(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + void __iomem *sckcr = osc->sckcr; + u32 tmp = readl(sckcr); + + if (tmp & AT91_SCKC_OSC32BYP) + return 0; + + writel(tmp | AT91_SCKC_OSC32EN, sckcr); + + usleep_range(osc->startup_usec, osc->startup_usec + 1); + + return 0; +} + +static void clk_slow_osc_unprepare(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + void __iomem *sckcr = osc->sckcr; + u32 tmp = readl(sckcr); + + if (tmp & AT91_SCKC_OSC32BYP) + return; + + writel(tmp & ~AT91_SCKC_OSC32EN, sckcr); +} + +static int clk_slow_osc_is_prepared(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + void __iomem *sckcr = osc->sckcr; + u32 tmp = readl(sckcr); + + if (tmp & AT91_SCKC_OSC32BYP) + return 1; + + return !!(tmp & AT91_SCKC_OSC32EN); +} + +static const struct clk_ops slow_osc_ops = { + .prepare = clk_slow_osc_prepare, + .unprepare = clk_slow_osc_unprepare, + .is_prepared = clk_slow_osc_is_prepared, +}; + +static struct clk * __init +at91_clk_register_slow_osc(void __iomem *sckcr, + const char *name, + const char *parent_name, + unsigned long startup, + bool bypass) +{ + struct clk_slow_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!sckcr || !name || !parent_name) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); + if (!osc) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &slow_osc_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + init.flags = CLK_IGNORE_UNUSED; + + osc->hw.init = &init; + osc->sckcr = sckcr; + osc->startup_usec = startup; + + if (bypass) + writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP, + sckcr); + + clk = clk_register(NULL, &osc->hw); + if (IS_ERR(clk)) + kfree(osc); + + return clk; +} + +void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, + void __iomem *sckcr) +{ + struct clk *clk; + const char *parent_name; + const char *name = np->name; + u32 startup; + bool bypass; + + parent_name = of_clk_get_parent_name(np, 0); + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "atmel,startup-time-usec", &startup); + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + clk = at91_clk_register_slow_osc(sckcr, name, parent_name, startup, + bypass); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + return osc->frequency; +} + +static unsigned long clk_slow_rc_osc_recalc_accuracy(struct clk_hw *hw, + unsigned long parent_acc) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + return osc->accuracy; +} + +static int clk_slow_rc_osc_prepare(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + void __iomem *sckcr = osc->sckcr; + + writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr); + + usleep_range(osc->startup_usec, osc->startup_usec + 1); + + return 0; +} + +static void clk_slow_rc_osc_unprepare(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + void __iomem *sckcr = osc->sckcr; + + writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr); +} + +static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + return !!(readl(osc->sckcr) & AT91_SCKC_RCEN); +} + +static const struct clk_ops slow_rc_osc_ops = { + .prepare = clk_slow_rc_osc_prepare, + .unprepare = clk_slow_rc_osc_unprepare, + .is_prepared = clk_slow_rc_osc_is_prepared, + .recalc_rate = clk_slow_rc_osc_recalc_rate, + .recalc_accuracy = clk_slow_rc_osc_recalc_accuracy, +}; + +static struct clk * __init +at91_clk_register_slow_rc_osc(void __iomem *sckcr, + const char *name, + unsigned long frequency, + unsigned long accuracy, + unsigned long startup) +{ + struct clk_slow_rc_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!sckcr || !name) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); + if (!osc) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &slow_rc_osc_ops; + init.parent_names = NULL; + init.num_parents = 0; + init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED; + + osc->hw.init = &init; + osc->sckcr = sckcr; + osc->frequency = frequency; + osc->accuracy = accuracy; + osc->startup_usec = startup; + + clk = clk_register(NULL, &osc->hw); + if (IS_ERR(clk)) + kfree(osc); + + return clk; +} + +void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, + void __iomem *sckcr) +{ + struct clk *clk; + u32 frequency = 0; + u32 accuracy = 0; + u32 startup = 0; + const char *name = np->name; + + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "clock-frequency", &frequency); + of_property_read_u32(np, "clock-accuracy", &accuracy); + of_property_read_u32(np, "atmel,startup-time-usec", &startup); + + clk = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy, + startup); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); + void __iomem *sckcr = slowck->sckcr; + u32 tmp; + + if (index > 1) + return -EINVAL; + + tmp = readl(sckcr); + + if ((!index && !(tmp & AT91_SCKC_OSCSEL)) || + (index && (tmp & AT91_SCKC_OSCSEL))) + return 0; + + if (index) + tmp |= AT91_SCKC_OSCSEL; + else + tmp &= ~AT91_SCKC_OSCSEL; + + writel(tmp, sckcr); + + usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); + + return 0; +} + +static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw) +{ + struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); + + return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL); +} + +static const struct clk_ops sam9x5_slow_ops = { + .set_parent = clk_sam9x5_slow_set_parent, + .get_parent = clk_sam9x5_slow_get_parent, +}; + +static struct clk * __init +at91_clk_register_sam9x5_slow(void __iomem *sckcr, + const char *name, + const char **parent_names, + int num_parents) +{ + struct clk_sam9x5_slow *slowck; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!sckcr || !name || !parent_names || !num_parents) + return ERR_PTR(-EINVAL); + + slowck = kzalloc(sizeof(*slowck), GFP_KERNEL); + if (!slowck) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sam9x5_slow_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = 0; + + slowck->hw.init = &init; + slowck->sckcr = sckcr; + slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL); + + clk = clk_register(NULL, &slowck->hw); + if (IS_ERR(clk)) + kfree(slowck); + + return clk; +} + +void __init of_at91sam9x5_clk_slow_setup(struct device_node *np, + void __iomem *sckcr) +{ + struct clk *clk; + const char *parent_names[2]; + int num_parents; + const char *name = np->name; + int i; + + num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (num_parents <= 0 || num_parents > 2) + return; + + for (i = 0; i < num_parents; ++i) { + parent_names[i] = of_clk_get_parent_name(np, i); + if (!parent_names[i]) + return; + } + + of_property_read_string(np, "clock-output-names", &name); + + clk = at91_clk_register_sam9x5_slow(sckcr, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw) +{ + struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw); + + return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL); +} + +static const struct clk_ops sam9260_slow_ops = { + .get_parent = clk_sam9260_slow_get_parent, +}; + +static struct clk * __init +at91_clk_register_sam9260_slow(struct at91_pmc *pmc, + const char *name, + const char **parent_names, + int num_parents) +{ + struct clk_sam9260_slow *slowck; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!pmc || !name) + return ERR_PTR(-EINVAL); + + if (!parent_names || !num_parents) + return ERR_PTR(-EINVAL); + + slowck = kzalloc(sizeof(*slowck), GFP_KERNEL); + if (!slowck) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sam9260_slow_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = 0; + + slowck->hw.init = &init; + slowck->pmc = pmc; + + clk = clk_register(NULL, &slowck->hw); + if (IS_ERR(clk)) + kfree(slowck); + + return clk; +} + +void __init of_at91sam9260_clk_slow_setup(struct device_node *np, + struct at91_pmc *pmc) +{ + struct clk *clk; + const char *parent_names[2]; + int num_parents; + const char *name = np->name; + int i; + + num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (num_parents <= 0 || num_parents > 1) + return; + + for (i = 0; i < num_parents; ++i) { + parent_names[i] = of_clk_get_parent_name(np, i); + if (!parent_names[i]) + return; + } + + of_property_read_string(np, "clock-output-names", &name); + + clk = at91_clk_register_sam9260_slow(pmc, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index dc5fdde98e1a..524196bb35a5 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -229,6 +229,11 @@ out_free_pmc: } static const struct of_device_id pmc_clk_ids[] __initconst = { + /* Slow oscillator */ + { + .compatible = "atmel,at91sam9260-clk-slow", + .data = of_at91sam9260_clk_slow_setup, + }, /* Main clock */ { .compatible = "atmel,at91rm9200-clk-main-osc", diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 42cc7cc5e1d3..6c7625976113 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -58,6 +58,9 @@ static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value) int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range); +extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np, + struct at91_pmc *pmc); + extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np, struct at91_pmc *pmc); extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c new file mode 100644 index 000000000000..1184d76a7ab7 --- /dev/null +++ b/drivers/clk/at91/sckc.c @@ -0,0 +1,57 @@ +/* + * drivers/clk/at91/sckc.c + * + * Copyright (C) 2013 Boris BREZILLON + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#include "sckc.h" + +static const struct of_device_id sckc_clk_ids[] __initconst = { + /* Slow clock */ + { + .compatible = "atmel,at91sam9x5-clk-slow-osc", + .data = of_at91sam9x5_clk_slow_osc_setup, + }, + { + .compatible = "atmel,at91sam9x5-clk-slow-rc-osc", + .data = of_at91sam9x5_clk_slow_rc_osc_setup, + }, + { + .compatible = "atmel,at91sam9x5-clk-slow", + .data = of_at91sam9x5_clk_slow_setup, + }, + { /*sentinel*/ } +}; + +static void __init of_at91sam9x5_sckc_setup(struct device_node *np) +{ + struct device_node *childnp; + void (*clk_setup)(struct device_node *, void __iomem *); + const struct of_device_id *clk_id; + void __iomem *regbase = of_iomap(np, 0); + + if (!regbase) + return; + + for_each_child_of_node(np, childnp) { + clk_id = of_match_node(sckc_clk_ids, childnp); + if (!clk_id) + continue; + clk_setup = clk_id->data; + clk_setup(childnp, regbase); + } +} +CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc", + of_at91sam9x5_sckc_setup); diff --git a/drivers/clk/at91/sckc.h b/drivers/clk/at91/sckc.h new file mode 100644 index 000000000000..836fcf59820f --- /dev/null +++ b/drivers/clk/at91/sckc.h @@ -0,0 +1,22 @@ +/* + * drivers/clk/at91/sckc.h + * + * Copyright (C) 2013 Boris BREZILLON + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __AT91_SCKC_H_ +#define __AT91_SCKC_H_ + +extern void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, + void __iomem *sckcr); +extern void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, + void __iomem *sckcr); +extern void __init of_at91sam9x5_clk_slow_setup(struct device_node *np, + void __iomem *sckcr); + +#endif /* __AT91_SCKC_H_ */ diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index a6911ebbd02a..de4268d4987a 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -155,6 +155,7 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */ #define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */ #define AT91_PMC_LOCKU (1 << 6) /* UPLL Lock [some SAM9] */ +#define AT91_PMC_OSCSEL (1 << 7) /* Slow Oscillator Selection [some SAM9] */ #define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */ #define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */ #define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */ -- cgit v1.2.3 From 2de0c019f34ffbe49744c453628afb270aa9adb6 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Tue, 15 Apr 2014 12:27:58 +0200 Subject: iio: adc: at91: cleanup platform_data num_channels and registers are not used anymore since they are defined inside the driver and assigned by matching the id_table. Also, struct at91_adc_reg_desc is now only used inside the driver. Signed-off-by: Alexandre Belloni Acked-by: Jonathan Cameron Signed-off-by: Nicolas Ferre --- drivers/iio/adc/at91_adc.c | 19 +++++++++++++++++++ include/linux/platform_data/at91_adc.h | 23 ----------------------- 2 files changed, 19 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 89777ed9abd8..1beae65aef2c 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -46,6 +46,25 @@ #define TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */ #define TOUCH_PEN_DETECT_DEBOUNCE_US 200 +/** + * struct at91_adc_reg_desc - Various informations relative to registers + * @channel_base: Base offset for the channel data registers + * @drdy_mask: Mask of the DRDY field in the relevant registers + (Interruptions registers mostly) + * @status_register: Offset of the Interrupt Status Register + * @trigger_register: Offset of the Trigger setup register + * @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register + * @mr_startup_mask: Mask of the STARTUP field in the adc MR register + */ +struct at91_adc_reg_desc { + u8 channel_base; + u32 drdy_mask; + u8 status_register; + u8 trigger_register; + u32 mr_prescal_mask; + u32 mr_startup_mask; +}; + struct at91_adc_caps { bool has_ts; /* Support touch screen */ bool has_tsmr; /* only at91sam9x5, sama5d3 have TSMR reg */ diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h index b3ca1e94e0c8..fcf73879dbfe 100644 --- a/include/linux/platform_data/at91_adc.h +++ b/include/linux/platform_data/at91_adc.h @@ -7,25 +7,6 @@ #ifndef _AT91_ADC_H_ #define _AT91_ADC_H_ -/** - * struct at91_adc_reg_desc - Various informations relative to registers - * @channel_base: Base offset for the channel data registers - * @drdy_mask: Mask of the DRDY field in the relevant registers - (Interruptions registers mostly) - * @status_register: Offset of the Interrupt Status Register - * @trigger_register: Offset of the Trigger setup register - * @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register - * @mr_startup_mask: Mask of the STARTUP field in the adc MR register - */ -struct at91_adc_reg_desc { - u8 channel_base; - u32 drdy_mask; - u8 status_register; - u8 trigger_register; - u32 mr_prescal_mask; - u32 mr_startup_mask; -}; - /** * struct at91_adc_trigger - description of triggers * @name: name of the trigger advertised to the user @@ -42,8 +23,6 @@ struct at91_adc_trigger { /** * struct at91_adc_data - platform data for ADC driver * @channels_used: channels in use on the board as a bitmask - * @num_channels: global number of channels available on the board - * @registers: Registers definition on the board * @startup_time: startup time of the ADC in microseconds * @trigger_list: Triggers available in the ADC * @trigger_number: Number of triggers available in the ADC @@ -52,8 +31,6 @@ struct at91_adc_trigger { */ struct at91_adc_data { unsigned long channels_used; - u8 num_channels; - struct at91_adc_reg_desc *registers; u8 startup_time; struct at91_adc_trigger *trigger_list; u8 trigger_number; -- cgit v1.2.3 From 84882b060301c35ab7e2c1ef355b0bd06b764195 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Tue, 15 Apr 2014 12:27:59 +0200 Subject: iio: adc: at91_adc: Add support for touchscreens without TSMR Old ADCs, as present on the sam9rl and the sam9g45 don't have a TSMR register and the touchscreen support should be handled differently. Signed-off-by: Alexandre Belloni Acked-by: Jonathan Cameron Signed-off-by: Nicolas Ferre --- arch/arm/mach-at91/include/mach/at91_adc.h | 13 ++ drivers/iio/adc/at91_adc.c | 200 ++++++++++++++++++++++------- include/linux/platform_data/at91_adc.h | 8 ++ 3 files changed, 174 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-at91/include/mach/at91_adc.h b/arch/arm/mach-at91/include/mach/at91_adc.h index c287307b9a3b..7d80396346b2 100644 --- a/arch/arm/mach-at91/include/mach/at91_adc.h +++ b/arch/arm/mach-at91/include/mach/at91_adc.h @@ -20,6 +20,9 @@ #define AT91_ADC_START (1 << 1) /* Start Conversion */ #define AT91_ADC_MR 0x04 /* Mode Register */ +#define AT91_ADC_TSAMOD (3 << 0) /* ADC mode */ +#define AT91_ADC_TSAMOD_ADC_ONLY_MODE (0 << 0) /* ADC Mode */ +#define AT91_ADC_TSAMOD_TS_ONLY_MODE (1 << 0) /* Touch Screen Only Mode */ #define AT91_ADC_TRGEN (1 << 0) /* Trigger Enable */ #define AT91_ADC_TRGSEL (7 << 1) /* Trigger Selection */ #define AT91_ADC_TRGSEL_TC0 (0 << 1) @@ -28,6 +31,7 @@ #define AT91_ADC_TRGSEL_EXTERNAL (6 << 1) #define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */ #define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */ +#define AT91_ADC_PENDET (1 << 6) /* Pen contact detection enable */ #define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */ #define AT91_ADC_PRESCAL_9G45 (0xff << 8) #define AT91_ADC_PRESCAL_(x) ((x) << 8) @@ -37,6 +41,12 @@ #define AT91_ADC_STARTUP_(x) ((x) << 16) #define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */ #define AT91_ADC_SHTIM_(x) ((x) << 24) +#define AT91_ADC_PENDBC (0x0f << 28) /* Pen Debounce time */ +#define AT91_ADC_PENDBC_(x) ((x) << 28) + +#define AT91_ADC_TSR 0x0C +#define AT91_ADC_TSR_SHTIM (0xf << 24) /* Sample & Hold Time */ +#define AT91_ADC_TSR_SHTIM_(x) ((x) << 24) #define AT91_ADC_CHER 0x10 /* Channel Enable Register */ #define AT91_ADC_CHDR 0x14 /* Channel Disable Register */ @@ -60,6 +70,8 @@ #define AT91_ADC_IER 0x24 /* Interrupt Enable Register */ #define AT91_ADC_IDR 0x28 /* Interrupt Disable Register */ #define AT91_ADC_IMR 0x2C /* Interrupt Mask Register */ +#define AT91RL_ADC_IER_PEN (1 << 20) +#define AT91RL_ADC_IER_NOPEN (1 << 21) #define AT91_ADC_IER_PEN (1 << 29) #define AT91_ADC_IER_NOPEN (1 << 30) #define AT91_ADC_IER_XRDY (1 << 20) @@ -102,6 +114,7 @@ #define AT91_ADC_TRGR_TRGPER (0xffff << 16) #define AT91_ADC_TRGR_TRGPER_(x) ((x) << 16) #define AT91_ADC_TRGR_TRGMOD (0x7 << 0) +#define AT91_ADC_TRGR_NONE (0 << 0) #define AT91_ADC_TRGR_MOD_PERIOD_TRIG (5 << 0) #endif diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 1beae65aef2c..c0e4206e34e5 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -46,6 +46,10 @@ #define TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */ #define TOUCH_PEN_DETECT_DEBOUNCE_US 200 +#define MAX_RLPOS_BITS 10 +#define TOUCH_SAMPLE_PERIOD_US_RL 10000 /* 10ms, the SoC can't keep up with 2ms */ +#define TOUCH_SHTIM 0xa + /** * struct at91_adc_reg_desc - Various informations relative to registers * @channel_base: Base offset for the channel data registers @@ -83,12 +87,6 @@ struct at91_adc_caps { struct at91_adc_reg_desc registers; }; -enum atmel_adc_ts_type { - ATMEL_ADC_TOUCHSCREEN_NONE = 0, - ATMEL_ADC_TOUCHSCREEN_4WIRE = 4, - ATMEL_ADC_TOUCHSCREEN_5WIRE = 5, -}; - struct at91_adc_state { struct clk *adc_clk; u16 *buffer; @@ -133,6 +131,11 @@ struct at91_adc_state { u16 ts_sample_period_val; u32 ts_pressure_threshold; + u16 ts_pendbc; + + bool ts_bufferedmeasure; + u32 ts_prev_absx; + u32 ts_prev_absy; }; static irqreturn_t at91_adc_trigger_handler(int irq, void *p) @@ -239,7 +242,72 @@ static int at91_ts_sample(struct at91_adc_state *st) return 0; } -static irqreturn_t at91_adc_interrupt(int irq, void *private) +static irqreturn_t at91_adc_rl_interrupt(int irq, void *private) +{ + struct iio_dev *idev = private; + struct at91_adc_state *st = iio_priv(idev); + u32 status = at91_adc_readl(st, st->registers->status_register); + unsigned int reg; + + status &= at91_adc_readl(st, AT91_ADC_IMR); + if (status & st->registers->drdy_mask) + handle_adc_eoc_trigger(irq, idev); + + if (status & AT91RL_ADC_IER_PEN) { + /* Disabling pen debounce is required to get a NOPEN irq */ + reg = at91_adc_readl(st, AT91_ADC_MR); + reg &= ~AT91_ADC_PENDBC; + at91_adc_writel(st, AT91_ADC_MR, reg); + + at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN); + at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_NOPEN + | AT91_ADC_EOC(3)); + /* Set up period trigger for sampling */ + at91_adc_writel(st, st->registers->trigger_register, + AT91_ADC_TRGR_MOD_PERIOD_TRIG | + AT91_ADC_TRGR_TRGPER_(st->ts_sample_period_val)); + } else if (status & AT91RL_ADC_IER_NOPEN) { + reg = at91_adc_readl(st, AT91_ADC_MR); + reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC; + at91_adc_writel(st, AT91_ADC_MR, reg); + at91_adc_writel(st, st->registers->trigger_register, + AT91_ADC_TRGR_NONE); + + at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_NOPEN + | AT91_ADC_EOC(3)); + at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN); + st->ts_bufferedmeasure = false; + input_report_key(st->ts_input, BTN_TOUCH, 0); + input_sync(st->ts_input); + } else if (status & AT91_ADC_EOC(3)) { + /* Conversion finished */ + if (st->ts_bufferedmeasure) { + /* + * Last measurement is always discarded, since it can + * be erroneous. + * Always report previous measurement + */ + input_report_abs(st->ts_input, ABS_X, st->ts_prev_absx); + input_report_abs(st->ts_input, ABS_Y, st->ts_prev_absy); + input_report_key(st->ts_input, BTN_TOUCH, 1); + input_sync(st->ts_input); + } else + st->ts_bufferedmeasure = true; + + /* Now make new measurement */ + st->ts_prev_absx = at91_adc_readl(st, AT91_ADC_CHAN(st, 3)) + << MAX_RLPOS_BITS; + st->ts_prev_absx /= at91_adc_readl(st, AT91_ADC_CHAN(st, 2)); + + st->ts_prev_absy = at91_adc_readl(st, AT91_ADC_CHAN(st, 1)) + << MAX_RLPOS_BITS; + st->ts_prev_absy /= at91_adc_readl(st, AT91_ADC_CHAN(st, 0)); + } + + return IRQ_HANDLED; +} + +static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private) { struct iio_dev *idev = private; struct at91_adc_state *st = iio_priv(idev); @@ -672,6 +740,8 @@ static int at91_adc_probe_dt_ts(struct device_node *node, return -EINVAL; } + if (!st->caps->has_tsmr) + return 0; prop = 0; of_property_read_u32(node, "atmel,adc-ts-pressure-threshold", &prop); st->ts_pressure_threshold = prop; @@ -795,6 +865,7 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st, st->trigger_number = pdata->trigger_number; st->trigger_list = pdata->trigger_list; st->registers = &st->caps->registers; + st->touchscreen_type = pdata->touchscreen_type; return 0; } @@ -809,7 +880,10 @@ static int atmel_ts_open(struct input_dev *dev) { struct at91_adc_state *st = input_get_drvdata(dev); - at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN); + if (st->caps->has_tsmr) + at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN); + else + at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN); return 0; } @@ -817,45 +891,61 @@ static void atmel_ts_close(struct input_dev *dev) { struct at91_adc_state *st = input_get_drvdata(dev); - at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN); + if (st->caps->has_tsmr) + at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN); + else + at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN); } static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz) { - u32 reg = 0, pendbc; + u32 reg = 0; int i = 0; - if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE) - reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS; - else - reg = AT91_ADC_TSMR_TSMODE_5WIRE; - /* a Pen Detect Debounce Time is necessary for the ADC Touch to avoid * pen detect noise. * The formula is : Pen Detect Debounce Time = (2 ^ pendbc) / ADCClock */ - pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz / 1000, 1); + st->ts_pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz / + 1000, 1); - while (pendbc >> ++i) + while (st->ts_pendbc >> ++i) ; /* Empty! Find the shift offset */ - if (abs(pendbc - (1 << i)) < abs(pendbc - (1 << (i - 1)))) - pendbc = i; + if (abs(st->ts_pendbc - (1 << i)) < abs(st->ts_pendbc - (1 << (i - 1)))) + st->ts_pendbc = i; else - pendbc = i - 1; + st->ts_pendbc = i - 1; - if (st->caps->has_tsmr) { - reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average) - & AT91_ADC_TSMR_TSAV; - reg |= AT91_ADC_TSMR_PENDBC_(pendbc) & AT91_ADC_TSMR_PENDBC; - reg |= AT91_ADC_TSMR_NOTSDMA; - reg |= AT91_ADC_TSMR_PENDET_ENA; - reg |= 0x03 << 8; /* TSFREQ, need bigger than TSAV */ - - at91_adc_writel(st, AT91_ADC_TSMR, reg); - } else { - /* TODO: for 9g45 which has no TSMR */ + if (!st->caps->has_tsmr) { + reg = at91_adc_readl(st, AT91_ADC_MR); + reg |= AT91_ADC_TSAMOD_TS_ONLY_MODE | AT91_ADC_PENDET; + + reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC; + at91_adc_writel(st, AT91_ADC_MR, reg); + + reg = AT91_ADC_TSR_SHTIM_(TOUCH_SHTIM) & AT91_ADC_TSR_SHTIM; + at91_adc_writel(st, AT91_ADC_TSR, reg); + + st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US_RL * + adc_clk_khz / 1000) - 1, 1); + + return 0; } + if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE) + reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS; + else + reg = AT91_ADC_TSMR_TSMODE_5WIRE; + + reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average) + & AT91_ADC_TSMR_TSAV; + reg |= AT91_ADC_TSMR_PENDBC_(st->ts_pendbc) & AT91_ADC_TSMR_PENDBC; + reg |= AT91_ADC_TSMR_NOTSDMA; + reg |= AT91_ADC_TSMR_PENDET_ENA; + reg |= 0x03 << 8; /* TSFREQ, needs to be bigger than TSAV */ + + at91_adc_writel(st, AT91_ADC_TSMR, reg); + /* Change adc internal resistor value for better pen detection, * default value is 100 kOhm. * 0 = 200 kOhm, 1 = 150 kOhm, 2 = 100 kOhm, 3 = 50 kOhm @@ -864,7 +954,7 @@ static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz) at91_adc_writel(st, AT91_ADC_ACR, st->caps->ts_pen_detect_sensitivity & AT91_ADC_ACR_PENDETSENS); - /* Sample Peroid Time = (TRGPER + 1) / ADCClock */ + /* Sample Period Time = (TRGPER + 1) / ADCClock */ st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US * adc_clk_khz / 1000) - 1, 1); @@ -893,17 +983,37 @@ static int at91_ts_register(struct at91_adc_state *st, __set_bit(EV_ABS, input->evbit); __set_bit(EV_KEY, input->evbit); __set_bit(BTN_TOUCH, input->keybit); - input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1, 0, 0); - input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1, 0, 0); - input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0); + if (st->caps->has_tsmr) { + input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1, + 0, 0); + input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1, + 0, 0); + input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0); + } else { + if (st->touchscreen_type != ATMEL_ADC_TOUCHSCREEN_4WIRE) { + dev_err(&pdev->dev, + "This touchscreen controller only support 4 wires\n"); + ret = -EINVAL; + goto err; + } + + input_set_abs_params(input, ABS_X, 0, (1 << MAX_RLPOS_BITS) - 1, + 0, 0); + input_set_abs_params(input, ABS_Y, 0, (1 << MAX_RLPOS_BITS) - 1, + 0, 0); + } st->ts_input = input; input_set_drvdata(input, st); ret = input_register_device(input); if (ret) - input_free_device(st->ts_input); + goto err; + + return ret; +err: + input_free_device(st->ts_input); return ret; } @@ -962,11 +1072,13 @@ static int at91_adc_probe(struct platform_device *pdev) */ at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST); at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF); - ret = request_irq(st->irq, - at91_adc_interrupt, - 0, - pdev->dev.driver->name, - idev); + + if (st->caps->has_tsmr) + ret = request_irq(st->irq, at91_adc_9x5_interrupt, 0, + pdev->dev.driver->name, idev); + else + ret = request_irq(st->irq, at91_adc_rl_interrupt, 0, + pdev->dev.driver->name, idev); if (ret) { dev_err(&pdev->dev, "Failed to allocate IRQ.\n"); return ret; @@ -1070,12 +1182,6 @@ static int at91_adc_probe(struct platform_device *pdev) goto error_disable_adc_clk; } } else { - if (!st->caps->has_tsmr) { - dev_err(&pdev->dev, "We don't support non-TSMR adc\n"); - ret = -ENODEV; - goto error_disable_adc_clk; - } - ret = at91_ts_register(st, pdev); if (ret) goto error_disable_adc_clk; diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h index fcf73879dbfe..7819fc787731 100644 --- a/include/linux/platform_data/at91_adc.h +++ b/include/linux/platform_data/at91_adc.h @@ -7,6 +7,12 @@ #ifndef _AT91_ADC_H_ #define _AT91_ADC_H_ +enum atmel_adc_ts_type { + ATMEL_ADC_TOUCHSCREEN_NONE = 0, + ATMEL_ADC_TOUCHSCREEN_4WIRE = 4, + ATMEL_ADC_TOUCHSCREEN_5WIRE = 5, +}; + /** * struct at91_adc_trigger - description of triggers * @name: name of the trigger advertised to the user @@ -28,6 +34,7 @@ struct at91_adc_trigger { * @trigger_number: Number of triggers available in the ADC * @use_external_triggers: does the board has external triggers availables * @vref: Reference voltage for the ADC in millivolts + * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires) */ struct at91_adc_data { unsigned long channels_used; @@ -36,6 +43,7 @@ struct at91_adc_data { u8 trigger_number; bool use_external_triggers; u16 vref; + enum atmel_adc_ts_type touchscreen_type; }; extern void __init at91_add_device_adc(struct at91_adc_data *data); -- cgit v1.2.3 From 03a3f53b965aaf1eb4f9423c1a55b41b3b4895b2 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Tue, 15 Apr 2014 12:28:09 +0200 Subject: ARM: at91: remove atmel_tsadcc platform_data Signed-off-by: Alexandre Belloni Signed-off-by: Nicolas Ferre --- arch/arm/mach-at91/board.h | 3 --- include/linux/platform_data/atmel.h | 7 ------- 2 files changed, 10 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-at91/board.h b/arch/arm/mach-at91/board.h index 6c08b341167d..4e773b55bc2d 100644 --- a/arch/arm/mach-at91/board.h +++ b/arch/arm/mach-at91/board.h @@ -118,9 +118,6 @@ struct isi_platform_data; extern void __init at91_add_device_isi(struct isi_platform_data *data, bool use_pck_as_mck); - /* Touchscreen Controller */ -extern void __init at91_add_device_tsadcc(struct at91_tsadcc_data *data); - /* CAN */ extern void __init at91_add_device_can(struct at91_can_data *data); diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index e26b0c14edea..1466443797d7 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -87,13 +87,6 @@ struct atmel_uart_data { int rts_gpio; /* optional RTS GPIO */ }; - /* Touchscreen Controller */ -struct at91_tsadcc_data { - unsigned int adc_clock; - u8 pendet_debounce; - u8 ts_sample_hold_time; -}; - /* CAN */ struct at91_can_data { void (*transceiver_switch)(int on); -- cgit v1.2.3 From 69dd0f848879328ae6c6f54c2ec80e49eef042d8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 9 Apr 2014 14:30:10 +0200 Subject: sched/idle: Remove TS_POLLING support Now that there are no architectures left using it, kill the support for TS_POLLING. Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andy Lutomirski Link: http://lkml.kernel.org/n/tip-6yurip2tfix2f4bfc5agu2s0@git.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 46 ++-------------------------------------------- 1 file changed, 2 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index accb66bfd722..725eef121c9f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2775,51 +2775,9 @@ static inline int spin_needbreak(spinlock_t *lock) /* * Idle thread specific functions to determine the need_resched - * polling state. We have two versions, one based on TS_POLLING in - * thread_info.status and one based on TIF_POLLING_NRFLAG in - * thread_info.flags + * polling state. */ -#ifdef TS_POLLING -static inline int tsk_is_polling(struct task_struct *p) -{ - return task_thread_info(p)->status & TS_POLLING; -} -static inline void __current_set_polling(void) -{ - current_thread_info()->status |= TS_POLLING; -} - -static inline bool __must_check current_set_polling_and_test(void) -{ - __current_set_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_task() - */ - smp_mb(); - - return unlikely(tif_need_resched()); -} - -static inline void __current_clr_polling(void) -{ - current_thread_info()->status &= ~TS_POLLING; -} - -static inline bool __must_check current_clr_polling_and_test(void) -{ - __current_clr_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_task() - */ - smp_mb(); - - return unlikely(tif_need_resched()); -} -#elif defined(TIF_POLLING_NRFLAG) +#ifdef TIF_POLLING_NRFLAG static inline int tsk_is_polling(struct task_struct *p) { return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); -- cgit v1.2.3 From 5eeaf1f1897372590105f155c6a7110b3fa36aef Mon Sep 17 00:00:00 2001 From: Stratos Karafotis Date: Wed, 7 May 2014 19:33:33 +0300 Subject: cpufreq: Fix build error on some platforms that use cpufreq_for_each_* On platforms that use cpufreq_for_each_* macros, build fails if CONFIG_CPU_FREQ=n, e.g. ARM/shmobile/koelsch/non-multiplatform: drivers/built-in.o: In function `clk_round_parent': clkdev.c:(.text+0xcf168): undefined reference to `cpufreq_next_valid' drivers/built-in.o: In function `clk_rate_table_find': clkdev.c:(.text+0xcf820): undefined reference to `cpufreq_next_valid' make[3]: *** [vmlinux] Error 1 Fix this making cpufreq_next_valid function inline and move it to cpufreq.h. Fixes: 27e289dce297 (cpufreq: Introduce macros for cpufreq_frequency_table iteration) Reported-and-tested-by: Geert Uytterhoeven Signed-off-by: Stratos Karafotis Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 11 ----------- include/linux/cpufreq.h | 11 +++++++++-- 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bfe82b63875f..a05c92198b9f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -237,17 +237,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -bool cpufreq_next_valid(struct cpufreq_frequency_table **pos) -{ - while ((*pos)->frequency != CPUFREQ_TABLE_END) - if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID) - return true; - else - (*pos)++; - return false; -} -EXPORT_SYMBOL_GPL(cpufreq_next_valid); - /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9d803b529ac2..3f458896d45c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -489,8 +489,15 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, } #endif - -bool cpufreq_next_valid(struct cpufreq_frequency_table **pos); +static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos) +{ + while ((*pos)->frequency != CPUFREQ_TABLE_END) + if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID) + return true; + else + (*pos)++; + return false; +} /* * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table -- cgit v1.2.3 From 9c9e321455fb806108f9dbb1872bacfd42c6002b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 8 May 2014 23:16:35 +0200 Subject: mfd: stmpe: add optional regulators The STMPE has VCC and VIO supply lines, and sometimes (as on Ux500) this comes from a software-controlled regulator. Make it possible to supply the STMPE with power from these regulators. Signed-off-by: Linus Walleij --- drivers/mfd/stmpe.c | 18 ++++++++++++++++++ include/linux/mfd/stmpe.h | 5 +++++ 2 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 9fa2dd6d38bd..294731be1a15 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "stmpe.h" static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) @@ -1186,6 +1187,18 @@ int stmpe_probe(struct stmpe_client_info *ci, int partnum) stmpe->variant = stmpe_variant_info[partnum]; stmpe->regs = stmpe->variant->regs; stmpe->num_gpios = stmpe->variant->num_gpios; + stmpe->vcc = devm_regulator_get_optional(ci->dev, "vcc"); + if (!IS_ERR(stmpe->vcc)) { + ret = regulator_enable(stmpe->vcc); + if (ret) + dev_warn(ci->dev, "failed to enable VCC supply\n"); + } + stmpe->vio = devm_regulator_get_optional(ci->dev, "vio"); + if (!IS_ERR(stmpe->vio)) { + ret = regulator_enable(stmpe->vio); + if (ret) + dev_warn(ci->dev, "failed to enable VIO supply\n"); + } dev_set_drvdata(stmpe->dev, stmpe); if (ci->init) @@ -1252,6 +1265,11 @@ int stmpe_probe(struct stmpe_client_info *ci, int partnum) int stmpe_remove(struct stmpe *stmpe) { + if (!IS_ERR(stmpe->vio)) + regulator_disable(stmpe->vio); + if (!IS_ERR(stmpe->vcc)) + regulator_disable(stmpe->vcc); + mfd_remove_devices(stmpe->dev); return 0; diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index 48395a69a7e9..980898620e57 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h @@ -11,6 +11,7 @@ #include struct device; +struct regulator; enum stmpe_block { STMPE_BLOCK_GPIO = 1 << 0, @@ -62,6 +63,8 @@ struct stmpe_client_info; /** * struct stmpe - STMPE MFD structure + * @vcc: optional VCC regulator + * @vio: optional VIO regulator * @lock: lock protecting I/O operations * @irq_lock: IRQ bus lock * @dev: device, mostly for dev_dbg() @@ -80,6 +83,8 @@ struct stmpe_client_info; * @pdata: platform data */ struct stmpe { + struct regulator *vcc; + struct regulator *vio; struct mutex lock; struct mutex irq_lock; struct device *dev; -- cgit v1.2.3 From 81c44c2b2ce358b1c5fe0065dc5d2e2010f39f1b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 24 Apr 2014 13:28:20 +0100 Subject: video/omap: fix modular build The framebuffer layer can be a loadable module, which forces omapfb to be a module as well. However, this breaks the lcd drivers, which are linked into the omapfb driver but each have their own module_init() function. To solve this, we split out the lcd drivers into separate modules and export omapfb_register_panel, which is the only interface required between the main omapfb driver and the lcd panel drivers. We also have to introduce a new Kconfig symbol for H3, since that lcd driver has a dependency on TPS65010, which we can express better in Kconfig than Makefile syntax. Signed-off-by: Arnd Bergmann Signed-off-by: Peter Griffin Cc: Jean-Christophe Plagniol-Villard Cc: Tomi Valkeinen Cc: linux-fbdev@vger.kernel.org Cc: linux-omap@vger.kernel.org Signed-off-by: Tomi Valkeinen --- drivers/video/fbdev/omap/Kconfig | 9 +++++++++ drivers/video/fbdev/omap/Makefile | 23 ++++++++++++----------- drivers/video/fbdev/omap/omapfb_main.c | 1 + include/linux/omap-dma.h | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/fbdev/omap/Kconfig b/drivers/video/fbdev/omap/Kconfig index 0bc3a936ce2b..18c4cb0d5690 100644 --- a/drivers/video/fbdev/omap/Kconfig +++ b/drivers/video/fbdev/omap/Kconfig @@ -39,6 +39,15 @@ config FB_OMAP_LCD_MIPID the Mobile Industry Processor Interface DBI-C/DCS specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3) +config FB_OMAP_LCD_H3 + bool "TPS65010 LCD controller on OMAP-H3" + depends on MACH_OMAP_H3 + depends on TPS65010 + default y + help + Say Y here if you want to have support for the LCD on the + H3 board. + config FB_OMAP_DMA_TUNE bool "Set DMA SDRAM access priority high" depends on FB_OMAP diff --git a/drivers/video/fbdev/omap/Makefile b/drivers/video/fbdev/omap/Makefile index 1927faffb5bc..732e0718be53 100644 --- a/drivers/video/fbdev/omap/Makefile +++ b/drivers/video/fbdev/omap/Makefile @@ -10,17 +10,18 @@ objs-y$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o -objs-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o -objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o -objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o -objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o -objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o -objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o -objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o -objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o - -objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o -objs-y$(CONFIG_MACH_HERALD) += lcd_htcherald.o +lcds-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o +lcds-y$(CONFIG_FB_OMAP_LCD_H3) += lcd_h3.o +lcds-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o +lcds-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o +lcds-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o +lcds-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o +lcds-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o +lcds-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o + +lcds-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o +lcds-y$(CONFIG_MACH_HERALD) += lcd_htcherald.o omapfb-objs := $(objs-yy) +obj-$(CONFIG_FB_OMAP) += $(lcds-yy) diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index e4fc6d9b5371..d8d028d98711 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1823,6 +1823,7 @@ void omapfb_register_panel(struct lcd_panel *panel) if (fbdev_pdev != NULL) omapfb_do_probe(fbdev_pdev, fbdev_panel); } +EXPORT_SYMBOL_GPL(omapfb_register_panel); /* Called when the device is being detached from the driver */ static int omapfb_remove(struct platform_device *pdev) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 41a13e70f41f..0a1a2e2d5c21 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -393,7 +393,7 @@ extern int omap_modify_dma_chain_params(int chain_id, extern int omap_dma_chain_status(int chain_id); #endif -#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_FB_OMAP) +#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP) #include #else static inline int omap_lcd_dma_running(void) -- cgit v1.2.3 From 29a1f2333e07bbbecb920cc78fd035fe8f53207a Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 25 Apr 2014 17:10:06 +0200 Subject: gpio: Add helpers for optional GPIOs Introduce gpiod_get_optional() and gpiod_get_index_optional() helpers that make it easier for drivers to handle optional GPIOs. Currently in order to handle optional GPIOs, a driver needs to special case error handling for -ENOENT, such as this: gpio = gpiod_get(dev, "foo"); if (IS_ERR(gpio)) { if (PTR_ERR(gpio) != -ENOENT) return PTR_ERR(gpio); gpio = NULL; } if (gpio) { /* set up GPIO */ } With these new helpers the above is reduced to: gpio = gpiod_get_optional(dev, "foo"); if (IS_ERR(gpio)) return PTR_ERR(gpio); if (gpio) { /* set up GPIO */ } While at it, device-managed variants of these functions are also provided. Signed-off-by: Thierry Reding Reviewed-by: Alexandre Courbot Signed-off-by: Linus Walleij --- Documentation/driver-model/devres.txt | 2 ++ drivers/gpio/devres.c | 43 +++++++++++++++++++++++++++++++++++ drivers/gpio/gpiolib.c | 43 +++++++++++++++++++++++++++++++++++ include/linux/gpio/consumer.h | 40 ++++++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+) (limited to 'include/linux') diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 8ff1167cfedf..10b8c5d2c797 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -312,4 +312,6 @@ SPI GPIO devm_gpiod_get() devm_gpiod_get_index() + devm_gpiod_get_optional() + devm_gpiod_get_index_optional() devm_gpiod_put() diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c index 307464fd015f..65978cf85f79 100644 --- a/drivers/gpio/devres.c +++ b/drivers/gpio/devres.c @@ -51,6 +51,22 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, } EXPORT_SYMBOL(devm_gpiod_get); +/** + * devm_gpiod_get_optional - Resource-managed gpiod_get_optional() + * @dev: GPIO consumer + * @con_id: function within the GPIO consumer + * + * Managed gpiod_get_optional(). GPIO descriptors returned from this function + * are automatically disposed on driver detach. See gpiod_get_optional() for + * detailed information about behavior and return values. + */ +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id) +{ + return devm_gpiod_get_index_optional(dev, con_id, 0); +} +EXPORT_SYMBOL(devm_gpiod_get_optional); + /** * devm_gpiod_get_index - Resource-managed gpiod_get_index() * @dev: GPIO consumer @@ -86,6 +102,33 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, } EXPORT_SYMBOL(devm_gpiod_get_index); +/** + * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional() + * @dev: GPIO consumer + * @con_id: function within the GPIO consumer + * @index: index of the GPIO to obtain in the consumer + * + * Managed gpiod_get_index_optional(). GPIO descriptors returned from this + * function are automatically disposed on driver detach. See + * gpiod_get_index_optional() for detailed information about behavior and + * return values. + */ +struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index) +{ + struct gpio_desc *desc; + + desc = devm_gpiod_get_index(dev, con_id, index); + if (IS_ERR(desc)) { + if (PTR_ERR(desc) == -ENOENT) + return NULL; + } + + return desc; +} +EXPORT_SYMBOL(devm_gpiod_get_index_optional); + /** * devm_gpiod_put - Resource-managed gpiod_put() * @desc: GPIO descriptor to dispose of diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4ad110e793c5..d9c9cb4665db 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2737,6 +2737,22 @@ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id) } EXPORT_SYMBOL_GPL(gpiod_get); +/** + * gpiod_get_optional - obtain an optional GPIO for a given GPIO function + * @dev: GPIO consumer, can be NULL for system-global GPIOs + * @con_id: function within the GPIO consumer + * + * This is equivalent to gpiod_get(), except that when no GPIO was assigned to + * the requested function it will return NULL. This is convenient for drivers + * that need to handle optional GPIOs. + */ +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id) +{ + return gpiod_get_index_optional(dev, con_id, 0); +} +EXPORT_SYMBOL_GPL(gpiod_get_optional); + /** * gpiod_get_index - obtain a GPIO from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs @@ -2799,6 +2815,33 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, } EXPORT_SYMBOL_GPL(gpiod_get_index); +/** + * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO + * function + * @dev: GPIO consumer, can be NULL for system-global GPIOs + * @con_id: function within the GPIO consumer + * @index: index of the GPIO to obtain in the consumer + * + * This is equivalent to gpiod_get_index(), except that when no GPIO with the + * specified index was assigned to the requested function it will return NULL. + * This is convenient for drivers that need to handle optional GPIOs. + */ +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index) +{ + struct gpio_desc *desc; + + desc = gpiod_get_index(dev, con_id, index); + if (IS_ERR(desc)) { + if (PTR_ERR(desc) == -ENOENT) + return NULL; + } + + return desc; +} +EXPORT_SYMBOL_GPL(gpiod_get_index_optional); + /** * gpiod_put - dispose of a GPIO descriptor * @desc: GPIO descriptor to dispose of diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index bed128e8f4b1..6a37ef0dc59c 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -23,6 +23,12 @@ struct gpio_desc *__must_check gpiod_get(struct device *dev, struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx); +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id); +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index); + void gpiod_put(struct gpio_desc *desc); struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, @@ -30,6 +36,12 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx); +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id); +struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index); + void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); int gpiod_get_direction(const struct gpio_desc *desc); @@ -73,6 +85,20 @@ static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev, { return ERR_PTR(-ENOSYS); } + +static inline struct gpio_desc *__must_check +gpiod_get_optional(struct device *dev, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index) +{ + return ERR_PTR(-ENOSYS); +} + static inline void gpiod_put(struct gpio_desc *desc) { might_sleep(); @@ -93,6 +119,20 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, { return ERR_PTR(-ENOSYS); } + +static inline struct gpio_desc *__must_check +devm_gpiod_get_optional(struct device *dev, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index) +{ + return ERR_PTR(-ENOSYS); +} + static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) { might_sleep(); -- cgit v1.2.3 From af76e555e5e29e08eb8ac1f7878e23dbf0d6741f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 6 May 2014 12:12:45 +0200 Subject: blk-mq: initialize struct request fields individually This allows us to avoid a non-atomic memset over ->atomic_flags as well as killing lots of duplicate initializations. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- include/linux/blkdev.h | 7 ++++--- 2 files changed, 49 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 3fdb097ebe5e..492f49f96459 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -82,9 +82,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; - blk_rq_init(hctx->queue, rq); rq->tag = tag; - return rq; } @@ -187,10 +185,54 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; + INIT_LIST_HEAD(&rq->queuelist); + /* csd/requeue_work/fifo_time is initialized before use */ + rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; + rq->cmd_type = 0; + /* do not touch atomic flags, it needs atomic ops against the timer */ + rq->cpu = -1; + rq->__data_len = 0; + rq->__sector = (sector_t) -1; + rq->bio = NULL; + rq->biotail = NULL; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv))); + rq->rq_disk = NULL; + rq->part = NULL; rq->start_time = jiffies; +#ifdef CONFIG_BLK_CGROUP + rq->rl = NULL; set_start_time_ns(rq); + rq->io_start_time_ns = 0; +#endif + rq->nr_phys_segments = 0; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + rq->nr_integrity_segments = 0; +#endif + rq->ioprio = 0; + rq->special = NULL; + /* tag was already set */ + rq->errors = 0; + memset(rq->__cmd, 0, sizeof(rq->__cmd)); + rq->cmd = rq->__cmd; + rq->cmd_len = BLK_MAX_CDB; + + rq->extra_len = 0; + rq->sense_len = 0; + rq->resid_len = 0; + rq->sense = NULL; + + rq->deadline = 0; + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + rq->retries = 0; + rq->end_io = NULL; + rq->end_io_data = NULL; + rq->next_rq = NULL; + ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } @@ -258,6 +300,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); blk_mq_put_tag(hctx->tags, tag); blk_mq_queue_exit(q); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 20b26d4e53a2..94b27210641b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -90,9 +90,10 @@ enum rq_cmd_type_bits { #define BLK_MAX_CDB 16 /* - * try to put the fields that are referenced together in the same cacheline. - * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() - * as well! + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. */ struct request { struct list_head queuelist; -- cgit v1.2.3 From 4bb659b156996f2993dc16fad71fec9ee070153c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2014 09:36:49 -0600 Subject: blk-mq: implement new and more efficient tagging scheme blk-mq currently uses percpu_ida for tag allocation. But that only works well if the ratio between tag space and number of CPUs is sufficiently high. For most devices and systems, that is not the case. The end result if that we either only utilize the tag space partially, or we end up attempting to fully exhaust it and run into lots of lock contention with stealing between CPUs. This is not optimal. This new tagging scheme is a hybrid bitmap allocator. It uses two tricks to both be SMP friendly and allow full exhaustion of the space: 1) We cache the last allocated (or freed) tag on a per blk-mq software context basis. This allows us to limit the space we have to search. The key element here is not caching it in the shared tag structure, otherwise we end up dirtying more shared cache lines on each allocate/free operation. 2) The tag space is split into cache line sized groups, and each context will start off randomly in that space. Even up to full utilization of the space, this divides the tag users efficiently into cache line groups, avoiding dirtying the same one both between allocators and between allocator and freeer. This scheme shows drastically better behaviour, both on small tag spaces but on large ones as well. It has been tested extensively to show better performance for all the cases blk-mq cares about. Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 415 ++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq-tag.h | 42 ++++- block/blk-mq.c | 23 ++- block/blk-mq.h | 4 +- include/linux/blk-mq.h | 6 +- 5 files changed, 391 insertions(+), 99 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 1f43d6ee956f..467f3a20b355 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -1,64 +1,257 @@ #include #include +#include #include #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_wait_for_tags(struct blk_mq_tags *tags, bool reserved) +void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, + bool reserved) { - int tag = blk_mq_get_tag(tags, __GFP_WAIT, reserved); - blk_mq_put_tag(tags, tag); + int tag, zero = 0; + + tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved); + blk_mq_put_tag(tags, tag, &zero); +} + +static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) +{ + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_mq_bitmap *bm = &bt->map[i]; + int ret; + + ret = find_first_zero_bit(&bm->word, bm->depth); + if (ret < bm->depth) + return true; + } + + return false; } bool blk_mq_has_free_tags(struct blk_mq_tags *tags) { - return !tags || - percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0; + if (!tags) + return true; + + return bt_has_free_tags(&tags->bitmap_tags); +} + +static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) +{ + int tag, org_last_tag, end; + + org_last_tag = last_tag = TAG_TO_BIT(last_tag); + end = bm->depth; + do { +restart: + tag = find_next_zero_bit(&bm->word, end, last_tag); + if (unlikely(tag >= end)) { + /* + * We started with an offset, start from 0 to + * exhaust the map. + */ + if (org_last_tag && last_tag) { + end = last_tag; + last_tag = 0; + goto restart; + } + return -1; + } + last_tag = tag + 1; + } while (test_and_set_bit_lock(tag, &bm->word)); + + return tag; +} + +/* + * Straight forward bitmap tag implementation, where each bit is a tag + * (cleared == free, and set == busy). The small twist is using per-cpu + * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue + * contexts. This enables us to drastically limit the space searched, + * without dirtying an extra shared cacheline like we would if we stored + * the cache value inside the shared blk_mq_bitmap_tags structure. On top + * of that, each word of tags is in a separate cacheline. This means that + * multiple users will tend to stick to different cachelines, at least + * until the map is exhausted. + */ +static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) +{ + unsigned int last_tag, org_last_tag; + int index, i, tag; + + last_tag = org_last_tag = *tag_cache; + index = TAG_TO_INDEX(last_tag); + + for (i = 0; i < bt->map_nr; i++) { + tag = __bt_get_word(&bt->map[index], last_tag); + if (tag != -1) { + tag += index * BITS_PER_LONG; + goto done; + } + + last_tag = 0; + if (++index >= bt->map_nr) + index = 0; + } + + *tag_cache = 0; + return -1; + + /* + * Only update the cache from the allocation path, if we ended + * up using the specific cached tag. + */ +done: + if (tag == org_last_tag) { + last_tag = tag + 1; + if (last_tag >= bt->depth - 1) + last_tag = 0; + + *tag_cache = last_tag; + } + + return tag; +} + +static inline void bt_index_inc(unsigned int *index) +{ + *index = (*index + 1) & (BT_WAIT_QUEUES - 1); +} + +static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, + struct blk_mq_hw_ctx *hctx) +{ + struct bt_wait_state *bs; + + if (!hctx) + return &bt->bs[0]; + + bs = &bt->bs[hctx->wait_index]; + bt_index_inc(&hctx->wait_index); + return bs; } -static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) +static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, gfp_t gfp) { + struct bt_wait_state *bs; + DEFINE_WAIT(wait); int tag; - tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? - TASK_UNINTERRUPTIBLE : TASK_RUNNING); - if (tag < 0) - return BLK_MQ_TAG_FAIL; - return tag + tags->nr_reserved_tags; + tag = __bt_get(bt, last_tag); + if (tag != -1) + return tag; + + if (!(gfp & __GFP_WAIT)) + return -1; + + bs = bt_wait_ptr(bt, hctx); + do { + bool was_empty; + + was_empty = list_empty(&wait.task_list); + prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); + + tag = __bt_get(bt, last_tag); + if (tag != -1) + break; + + if (was_empty) + atomic_set(&bs->wait_cnt, bt->wake_cnt); + + io_schedule(); + } while (1); + + finish_wait(&bs->wait, &wait); + return tag; +} + +static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, + struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, gfp_t gfp) +{ + int tag; + + tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); + if (tag >= 0) + return tag + tags->nr_reserved_tags; + + return BLK_MQ_TAG_FAIL; } static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, gfp_t gfp) { - int tag; + int tag, zero = 0; if (unlikely(!tags->nr_reserved_tags)) { WARN_ON_ONCE(1); return BLK_MQ_TAG_FAIL; } - tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? - TASK_UNINTERRUPTIBLE : TASK_RUNNING); + tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); if (tag < 0) return BLK_MQ_TAG_FAIL; + return tag; } -unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved) +unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, + struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, + gfp_t gfp, bool reserved) { if (!reserved) - return __blk_mq_get_tag(tags, gfp); + return __blk_mq_get_tag(tags, hctx, last_tag, gfp); return __blk_mq_get_reserved_tag(tags, gfp); } +static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) +{ + int i, wake_index; + + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) { + if (wake_index != bt->wake_index) + bt->wake_index = wake_index; + + return bs; + } + + bt_index_inc(&wake_index); + } + + return NULL; +} + +static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) +{ + const int index = TAG_TO_INDEX(tag); + struct bt_wait_state *bs; + + clear_bit(TAG_TO_BIT(tag), &bt->map[index].word); + + bs = bt_wake_ptr(bt); + if (bs && atomic_dec_and_test(&bs->wait_cnt)) { + smp_mb__after_clear_bit(); + atomic_set(&bs->wait_cnt, bt->wake_cnt); + bt_index_inc(&bt->wake_index); + wake_up(&bs->wait); + } +} + static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) { BUG_ON(tag >= tags->nr_tags); - percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags); + bt_clear_tag(&tags->bitmap_tags, tag); } static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, @@ -66,22 +259,41 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, { BUG_ON(tag >= tags->nr_reserved_tags); - percpu_ida_free(&tags->reserved_tags, tag); + bt_clear_tag(&tags->breserved_tags, tag); } -void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) +void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, + unsigned int *last_tag) { - if (tag >= tags->nr_reserved_tags) - __blk_mq_put_tag(tags, tag); - else + if (tag >= tags->nr_reserved_tags) { + const int real_tag = tag - tags->nr_reserved_tags; + + __blk_mq_put_tag(tags, real_tag); + *last_tag = real_tag; + } else __blk_mq_put_reserved_tag(tags, tag); } -static int __blk_mq_tag_iter(unsigned id, void *data) +static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, + unsigned long *free_map, unsigned int off) { - unsigned long *tag_map = data; - __set_bit(id, tag_map); - return 0; + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_mq_bitmap *bm = &bt->map[i]; + int bit = 0; + + do { + bit = find_next_zero_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + __set_bit(bit + off, free_map); + bit++; + } while (1); + + off += BITS_PER_LONG; + } } void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, @@ -95,21 +307,98 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, if (!tag_map) return; - percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map); + bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); if (tags->nr_reserved_tags) - percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter, - tag_map); + bt_for_each_free(&tags->breserved_tags, tag_map, 0); fn(data, tag_map); kfree(tag_map); } +static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) +{ + unsigned int i, used; + + for (i = 0, used = 0; i < bt->map_nr; i++) { + struct blk_mq_bitmap *bm = &bt->map[i]; + + used += bitmap_weight(&bm->word, bm->depth); + } + + return bt->depth - used; +} + +static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, + int node, bool reserved) +{ + int i; + + /* + * Depth can be zero for reserved tags, that's not a failure + * condition. + */ + if (depth) { + int nr, i, map_depth; + + nr = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; + bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), + GFP_KERNEL, node); + if (!bt->map) + return -ENOMEM; + + bt->map_nr = nr; + map_depth = depth; + for (i = 0; i < nr; i++) { + bt->map[i].depth = min(map_depth, BITS_PER_LONG); + map_depth -= BITS_PER_LONG; + } + } + + bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); + if (!bt->bs) { + kfree(bt->map); + return -ENOMEM; + } + + for (i = 0; i < BT_WAIT_QUEUES; i++) + init_waitqueue_head(&bt->bs[i].wait); + + bt->wake_cnt = BT_WAIT_BATCH; + if (bt->wake_cnt > depth / 4) + bt->wake_cnt = max(1U, depth / 4); + + bt->depth = depth; + return 0; +} + +static void bt_free(struct blk_mq_bitmap_tags *bt) +{ + kfree(bt->map); + kfree(bt->bs); +} + +static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + if (bt_alloc(&tags->bitmap_tags, depth, node, false)) + goto enomem; + if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) + goto enomem; + + return tags; +enomem: + bt_free(&tags->bitmap_tags); + kfree(tags); + return NULL; +} + struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node) { unsigned int nr_tags, nr_cache; struct blk_mq_tags *tags; - int ret; if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large\n"); @@ -121,72 +410,46 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, return NULL; nr_tags = total_tags - reserved_tags; - nr_cache = nr_tags / num_possible_cpus(); - - if (nr_cache < BLK_MQ_TAG_CACHE_MIN) - nr_cache = BLK_MQ_TAG_CACHE_MIN; - else if (nr_cache > BLK_MQ_TAG_CACHE_MAX) - nr_cache = BLK_MQ_TAG_CACHE_MAX; + nr_cache = nr_tags / num_online_cpus(); tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; - tags->nr_max_cache = nr_cache; - tags->nr_batch_move = max(1u, nr_cache / 2); - - ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags - - tags->nr_reserved_tags, - tags->nr_max_cache, - tags->nr_batch_move); - if (ret) - goto err_free_tags; - - if (reserved_tags) { - /* - * With max_cahe and batch set to 1, the allocator fallbacks to - * no cached. It's fine reserved tags allocation is slow. - */ - ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags, - 1, 1); - if (ret) - goto err_reserved_tags; - } - return tags; - -err_reserved_tags: - percpu_ida_destroy(&tags->free_tags); -err_free_tags: - kfree(tags); - return NULL; + return blk_mq_init_bitmap_tags(tags, node); } void blk_mq_free_tags(struct blk_mq_tags *tags) { - percpu_ida_destroy(&tags->free_tags); - percpu_ida_destroy(&tags->reserved_tags); + bt_free(&tags->bitmap_tags); + bt_free(&tags->breserved_tags); kfree(tags); } +void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + if (depth > 1) + *tag = prandom_u32() % (depth - 1); + else + *tag = 0; +} + ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; - unsigned int cpu; + unsigned int free, res; if (!tags) return 0; - page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u," - " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags, - tags->nr_batch_move, tags->nr_max_cache); + page += sprintf(page, "nr_tags=%u, reserved_tags=%u\n", + tags->nr_tags, tags->nr_reserved_tags); - page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", - percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids), - percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids)); + free = bt_unused_tags(&tags->bitmap_tags); + res = bt_unused_tags(&tags->breserved_tags); - for_each_possible_cpu(cpu) { - page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu, - percpu_ida_free_tags(&tags->free_tags, cpu)); - } + page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); return page - orig_page; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index c8e0645ea331..06d4a2f0f7a0 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -1,7 +1,34 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H -#include +enum { + BT_WAIT_QUEUES = 8, + BT_WAIT_BATCH = 8, +}; + +struct bt_wait_state { + atomic_t wait_cnt; + wait_queue_head_t wait; +} ____cacheline_aligned_in_smp; + +#define TAG_TO_INDEX(tag) ((tag) / BITS_PER_LONG) +#define TAG_TO_BIT(tag) ((tag) & (BITS_PER_LONG - 1)) + +struct blk_mq_bitmap { + unsigned long word; + unsigned long depth; +} ____cacheline_aligned_in_smp; + +struct blk_mq_bitmap_tags { + unsigned int depth; + unsigned int wake_cnt; + + struct blk_mq_bitmap *map; + unsigned int map_nr; + + unsigned int wake_index; + struct bt_wait_state *bs; +}; /* * Tag address space map. @@ -9,11 +36,9 @@ struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; - unsigned int nr_batch_move; - unsigned int nr_max_cache; - struct percpu_ida free_tags; - struct percpu_ida reserved_tags; + struct blk_mq_bitmap_tags bitmap_tags; + struct blk_mq_bitmap_tags breserved_tags; struct request **rqs; struct list_head page_list; @@ -23,12 +48,13 @@ struct blk_mq_tags { extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); -extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, bool reserved); -extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag); +extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); +extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, bool reserved); +extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, unsigned int *last_tag); extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); +extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index 492f49f96459..9f07a266f7ab 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -74,12 +74,13 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, } static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, gfp_t gfp, bool reserved) { struct request *rq; unsigned int tag; - tag = blk_mq_get_tag(hctx->tags, gfp, reserved); + tag = blk_mq_get_tag(hctx->tags, hctx, &ctx->last_tag, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; rq->tag = tag; @@ -246,7 +247,8 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); + rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT, + reserved); if (rq) { blk_mq_rq_ctx_init(q, ctx, rq, rw); break; @@ -260,7 +262,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_wait_for_tags(hctx->tags, reserved); + blk_mq_wait_for_tags(hctx->tags, hctx, reserved); } while (1); return rq; @@ -278,6 +280,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) blk_mq_put_ctx(rq->mq_ctx); return rq; } +EXPORT_SYMBOL(blk_mq_alloc_request); struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp) @@ -301,7 +304,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct request_queue *q = rq->q; clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); - blk_mq_put_tag(hctx->tags, tag); + blk_mq_put_tag(hctx->tags, tag, &ctx->last_tag); blk_mq_queue_exit(q); } @@ -677,11 +680,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) queued++; continue; case BLK_MQ_RQ_QUEUE_BUSY: - /* - * FIXME: we should have a mechanism to stop the queue - * like blk_stop_queue, otherwise we will waste cpu - * time - */ list_add(&rq->queuelist, &rq_list); __blk_mq_requeue_request(rq); break; @@ -873,6 +871,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, list_add(&rq->queuelist, &ctx->rq_list); else list_add_tail(&rq->queuelist, &ctx->rq_list); + blk_mq_hctx_mark_pending(hctx, ctx); /* @@ -1046,7 +1045,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) if (is_sync) rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); - rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); + rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false); if (likely(rq)) blk_mq_rq_ctx_init(q, ctx, rq, rw); else { @@ -1130,8 +1129,8 @@ EXPORT_SYMBOL(blk_mq_map_queue); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, unsigned int hctx_index) { - return kmalloc_node(sizeof(struct blk_mq_hw_ctx), - GFP_KERNEL | __GFP_ZERO, set->numa_node); + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, + set->numa_node); } EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); diff --git a/block/blk-mq.h b/block/blk-mq.h index 1ae364ceaf8b..97cfab9c092f 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -12,6 +12,8 @@ struct blk_mq_ctx { unsigned int cpu; unsigned int index_hw; + unsigned int last_tag ____cacheline_aligned_in_smp; + /* incremented at dispatch time */ unsigned long rq_dispatched[2]; unsigned long rq_merged; @@ -21,7 +23,7 @@ struct blk_mq_ctx { struct request_queue *queue; struct kobject kobj; -}; +} ____cacheline_aligned_in_smp; void __blk_mq_complete_request(struct request *rq); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5bd677e2dcb7..f83d15f6e1c1 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -31,10 +31,12 @@ struct blk_mq_hw_ctx { void *driver_data; - unsigned int nr_ctx; - struct blk_mq_ctx **ctxs; unsigned int nr_ctx_map; unsigned long *ctx_map; + unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + + unsigned int wait_index; struct blk_mq_tags *tags; -- cgit v1.2.3 From 4593df29b94b31de931dc20d7da2e6c468c8d473 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Fri, 21 Mar 2014 10:13:05 +0100 Subject: mmc: mmci: Enforce DT for signal direction and feedback clock Remove the option to provide signal direction configuration and feeback clock as platform data, enforce it through DT. Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 34 ++++++++++++++-------------------- drivers/mmc/host/mmci.h | 11 +++++++++++ include/linux/amba/mmci.h | 16 ---------------- 3 files changed, 25 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 0d3ee08662a9..c0353f84d5be 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1287,7 +1287,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * indicating signal direction for the signals in * the SD/MMC bus and feedback-clock usage. */ - pwr |= host->plat->sigdir; + pwr |= host->pwr_reg_add; if (ios->bus_width == MMC_BUS_WIDTH_4) pwr &= ~MCI_ST_DATA74DIREN; @@ -1386,29 +1386,26 @@ static struct mmc_host_ops mmci_ops = { .start_signal_voltage_switch = mmci_sig_volt_switch, }; -static void mmci_dt_populate_generic_pdata(struct device_node *np, - struct mmci_platform_data *pdata) +static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) { + struct mmci_host *host = mmc_priv(mmc); + int ret = mmc_of_parse(mmc); + + if (ret) + return ret; + if (of_get_property(np, "st,sig-dir-dat0", NULL)) - pdata->sigdir |= MCI_ST_DATA0DIREN; + host->pwr_reg_add |= MCI_ST_DATA0DIREN; if (of_get_property(np, "st,sig-dir-dat2", NULL)) - pdata->sigdir |= MCI_ST_DATA2DIREN; + host->pwr_reg_add |= MCI_ST_DATA2DIREN; if (of_get_property(np, "st,sig-dir-dat31", NULL)) - pdata->sigdir |= MCI_ST_DATA31DIREN; + host->pwr_reg_add |= MCI_ST_DATA31DIREN; if (of_get_property(np, "st,sig-dir-dat74", NULL)) - pdata->sigdir |= MCI_ST_DATA74DIREN; + host->pwr_reg_add |= MCI_ST_DATA74DIREN; if (of_get_property(np, "st,sig-dir-cmd", NULL)) - pdata->sigdir |= MCI_ST_CMDDIREN; + host->pwr_reg_add |= MCI_ST_CMDDIREN; if (of_get_property(np, "st,sig-pin-fbclk", NULL)) - pdata->sigdir |= MCI_ST_FBCLKEN; -} - -static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) -{ - int ret = mmc_of_parse(mmc); - - if (ret) - return ret; + host->pwr_reg_add |= MCI_ST_FBCLKEN; if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) mmc->caps |= MMC_CAP_MMC_HIGHSPEED; @@ -1440,9 +1437,6 @@ static int mmci_probe(struct amba_device *dev, return -ENOMEM; } - if (np) - mmci_dt_populate_generic_pdata(np, plat); - mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); if (!mmc) return -ENOMEM; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 8fc5814f938a..347d942d740b 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -13,6 +13,16 @@ #define MCI_PWR_ON 0x03 #define MCI_OD (1 << 6) #define MCI_ROD (1 << 7) +/* + * The ST Micro version does not have ROD and reuse the voltage registers for + * direction settings. + */ +#define MCI_ST_DATA2DIREN (1 << 2) +#define MCI_ST_CMDDIREN (1 << 3) +#define MCI_ST_DATA0DIREN (1 << 4) +#define MCI_ST_DATA31DIREN (1 << 5) +#define MCI_ST_FBCLKEN (1 << 7) +#define MCI_ST_DATA74DIREN (1 << 8) #define MMCICLOCK 0x004 #define MCI_CLK_ENABLE (1 << 8) @@ -183,6 +193,7 @@ struct mmci_host { unsigned int mclk; unsigned int cclk; u32 pwr_reg; + u32 pwr_reg_add; u32 clk_reg; u32 datactrl_reg; u32 busy_status; diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 32a89cf5ec45..0d3ff95b3b4c 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -6,19 +6,6 @@ #include - -/* - * These defines is places here due to access is needed from machine - * configuration files. The ST Micro version does not have ROD and - * reuse the voltage registers for direction settings. - */ -#define MCI_ST_DATA2DIREN (1 << 2) -#define MCI_ST_CMDDIREN (1 << 3) -#define MCI_ST_DATA0DIREN (1 << 4) -#define MCI_ST_DATA31DIREN (1 << 5) -#define MCI_ST_FBCLKEN (1 << 7) -#define MCI_ST_DATA74DIREN (1 << 8) - /* Just some dummy forwarding */ struct dma_chan; @@ -45,8 +32,6 @@ struct dma_chan; * @capabilities: the capabilities of the block as implemented in * this platform, signify anything MMC_CAP_* from mmc/host.h * @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h - * @sigdir: a bit field indicating for what bits in the MMC bus the host - * should enable signal direction indication. * @dma_filter: function used to select an appropriate RX and TX * DMA channel to be used for DMA, if and only if you're deploying the * generic DMA engine @@ -69,7 +54,6 @@ struct mmci_platform_data { bool cd_invert; unsigned long capabilities; unsigned long capabilities2; - u32 sigdir; bool (*dma_filter)(struct dma_chan *chan, void *filter_param); void *dma_rx_param; void *dma_tx_param; -- cgit v1.2.3 From 3faf80dfa342e98b5780e0b78b7a670c7b61a9be Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Fri, 21 Mar 2014 10:29:10 +0100 Subject: mmc: mmci: Enforce mmc capabilities through DT Remove the option to provide the flags for mmc capabilities as platform data, enforce it through DT. Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 2 -- include/linux/amba/mmci.h | 5 ----- 2 files changed, 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index c0353f84d5be..9c60325f1a30 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1519,8 +1519,6 @@ static int mmci_probe(struct amba_device *dev, dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); /* DT takes precedence over platform data. */ - mmc->caps = np ? mmc->caps : plat->capabilities; - mmc->caps2 = np ? mmc->caps2 : plat->capabilities2; if (!np) { if (!plat->cd_invert) mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 0d3ff95b3b4c..b992fc931295 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -29,9 +29,6 @@ struct dma_chan; * @gpio_wp: read this GPIO pin to see if the card is write protected * @gpio_cd: read this GPIO pin to detect card insertion * @cd_invert: true if the gpio_cd pin value is active low - * @capabilities: the capabilities of the block as implemented in - * this platform, signify anything MMC_CAP_* from mmc/host.h - * @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h * @dma_filter: function used to select an appropriate RX and TX * DMA channel to be used for DMA, if and only if you're deploying the * generic DMA engine @@ -52,8 +49,6 @@ struct mmci_platform_data { int gpio_wp; int gpio_cd; bool cd_invert; - unsigned long capabilities; - unsigned long capabilities2; bool (*dma_filter)(struct dma_chan *chan, void *filter_param); void *dma_rx_param; void *dma_tx_param; -- cgit v1.2.3 From 725b418b43d2ddcb94b413cd25c74c1175d1c5f0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 22 Apr 2014 15:11:41 +0200 Subject: clk: Fixup spacing in comments - Remove spaces in front of TABs, - Correct indentation for some CLK_* flag descriptions. Signed-off-by: Geert Uytterhoeven Signed-off-by: Mike Turquette --- include/linux/clk-provider.h | 88 ++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 59e2eb58f555..397f98505bd4 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -40,14 +40,14 @@ struct dentry; * through the clk_* api. * * @prepare: Prepare the clock for enabling. This must not return until - * the clock is fully prepared, and it's safe to call clk_enable. - * This callback is intended to allow clock implementations to - * do any initialisation that may sleep. Called with - * prepare_lock held. + * the clock is fully prepared, and it's safe to call clk_enable. + * This callback is intended to allow clock implementations to + * do any initialisation that may sleep. Called with + * prepare_lock held. * * @unprepare: Release the clock from its prepared state. This will typically - * undo any work done in the @prepare callback. Called with - * prepare_lock held. + * undo any work done in the @prepare callback. Called with + * prepare_lock held. * * @is_prepared: Queries the hardware to determine if the clock is prepared. * This function is allowed to sleep. Optional, if this op is not @@ -58,16 +58,16 @@ struct dentry; * Called with prepare mutex held. This function may sleep. * * @enable: Enable the clock atomically. This must not return until the - * clock is generating a valid clock signal, usable by consumer - * devices. Called with enable_lock held. This function must not - * sleep. + * clock is generating a valid clock signal, usable by consumer + * devices. Called with enable_lock held. This function must not + * sleep. * * @disable: Disable the clock atomically. Called with enable_lock held. - * This function must not sleep. + * This function must not sleep. * * @is_enabled: Queries the hardware to determine if the clock is enabled. - * This function must not sleep. Optional, if this op is not - * set then the enable count will be used. + * This function must not sleep. Optional, if this op is not + * set then the enable count will be used. * * @disable_unused: Disable the clock atomically. Only called from * clk_disable_unused for gate clocks with special needs. @@ -75,34 +75,34 @@ struct dentry; * sleep. * * @recalc_rate Recalculate the rate of this clock, by querying hardware. The - * parent rate is an input parameter. It is up to the caller to - * ensure that the prepare_mutex is held across this call. - * Returns the calculated rate. Optional, but recommended - if - * this op is not set then clock rate will be initialized to 0. + * parent rate is an input parameter. It is up to the caller to + * ensure that the prepare_mutex is held across this call. + * Returns the calculated rate. Optional, but recommended - if + * this op is not set then clock rate will be initialized to 0. * * @round_rate: Given a target rate as input, returns the closest rate actually - * supported by the clock. + * supported by the clock. * * @determine_rate: Given a target rate as input, returns the closest rate * actually supported by the clock, and optionally the parent clock * that should be used to provide the clock rate. * * @get_parent: Queries the hardware to determine the parent of a clock. The - * return value is a u8 which specifies the index corresponding to - * the parent clock. This index can be applied to either the - * .parent_names or .parents arrays. In short, this function - * translates the parent value read from hardware into an array - * index. Currently only called when the clock is initialized by - * __clk_init. This callback is mandatory for clocks with - * multiple parents. It is optional (and unnecessary) for clocks - * with 0 or 1 parents. + * return value is a u8 which specifies the index corresponding to + * the parent clock. This index can be applied to either the + * .parent_names or .parents arrays. In short, this function + * translates the parent value read from hardware into an array + * index. Currently only called when the clock is initialized by + * __clk_init. This callback is mandatory for clocks with + * multiple parents. It is optional (and unnecessary) for clocks + * with 0 or 1 parents. * * @set_parent: Change the input source of this clock; for clocks with multiple - * possible parents specify a new parent by passing in the index - * as a u8 corresponding to the parent in either the .parent_names - * or .parents arrays. This function in affect translates an - * array index into the value programmed into the hardware. - * Returns 0 on success, -EERROR otherwise. + * possible parents specify a new parent by passing in the index + * as a u8 corresponding to the parent in either the .parent_names + * or .parents arrays. This function in affect translates an + * array index into the value programmed into the hardware. + * Returns 0 on success, -EERROR otherwise. * * @set_rate: Change the rate of this clock. The requested rate is specified * by the second argument, which should typically be the return @@ -254,12 +254,12 @@ void of_fixed_clk_setup(struct device_node *np); * * Flags: * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to - * enable the clock. Setting this flag does the opposite: setting the bit - * disable the clock and clearing it enables the clock + * enable the clock. Setting this flag does the opposite: setting the bit + * disable the clock and clearing it enables the clock * CLK_GATE_HIWORD_MASK - The gate settings are only in lower 16-bit - * of this register, and mask of gate bits are in higher 16-bit of this - * register. While setting the gate bits, higher 16-bit should also be - * updated to indicate changing gate bits. + * of this register, and mask of gate bits are in higher 16-bit of this + * register. While setting the gate bits, higher 16-bit should also be + * updated to indicate changing gate bits. */ struct clk_gate { struct clk_hw hw; @@ -298,20 +298,20 @@ struct clk_div_table { * * Flags: * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the - * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is - * the raw value read from the register, with the value of zero considered + * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is + * the raw value read from the register, with the value of zero considered * invalid, unless CLK_DIVIDER_ALLOW_ZERO is set. * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from - * the hardware register + * the hardware register * CLK_DIVIDER_ALLOW_ZERO - Allow zero divisors. For dividers which have * CLK_DIVIDER_ONE_BASED set, it is possible to end up with a zero divisor. * Some hardware implementations gracefully handle this case and allow a * zero divisor by not modifying their input clock * (divide by one / bypass). * CLK_DIVIDER_HIWORD_MASK - The divider settings are only in lower 16-bit - * of this register, and mask of divider bits are in higher 16-bit of this - * register. While setting the divider bits, higher 16-bit should also be - * updated to indicate changing divider bits. + * of this register, and mask of divider bits are in higher 16-bit of this + * register. While setting the divider bits, higher 16-bit should also be + * updated to indicate changing divider bits. * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded * to the closest integer instead of the up one. */ @@ -359,9 +359,9 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, * CLK_MUX_INDEX_ONE - register index starts at 1, not 0 * CLK_MUX_INDEX_BIT - register index is a single bit (power of two) * CLK_MUX_HIWORD_MASK - The mux settings are only in lower 16-bit of this - * register, and mask of mux bits are in higher 16-bit of this register. - * While setting the mux bits, higher 16-bit should also be updated to - * indicate changing mux bits. + * register, and mask of mux bits are in higher 16-bit of this register. + * While setting the mux bits, higher 16-bit should also be updated to + * indicate changing mux bits. */ struct clk_mux { struct clk_hw hw; -- cgit v1.2.3 From 54e73016dd217be915ed83353d296f2a133d1ad5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 22 Apr 2014 15:11:42 +0200 Subject: clk: Improve clk_ops documentation General: - Add parameter names to .round_rate() and .set_rate(). Documentation/clk.txt: - Add missing parameter for .set_rate(), - Add missing .debug_init(). include/linux/clk-provider.h: - Add parent rate documentation for .round_rate(), - Reorder documentation to match implementation order, - Add missing documentation for .init(). Signed-off-by: Geert Uytterhoeven Signed-off-by: Mike Turquette --- Documentation/clk.txt | 16 +++++++++++----- include/linux/clk-provider.h | 44 +++++++++++++++++++++++++------------------- 2 files changed, 36 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/Documentation/clk.txt b/Documentation/clk.txt index c9c399af7c08..1fee72f4d331 100644 --- a/Documentation/clk.txt +++ b/Documentation/clk.txt @@ -68,21 +68,27 @@ the operations defined in clk.h: int (*is_enabled)(struct clk_hw *hw); unsigned long (*recalc_rate)(struct clk_hw *hw, unsigned long parent_rate); - long (*round_rate)(struct clk_hw *hw, unsigned long, - unsigned long *); + long (*round_rate)(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate); long (*determine_rate)(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, struct clk **best_parent_clk); int (*set_parent)(struct clk_hw *hw, u8 index); u8 (*get_parent)(struct clk_hw *hw); - int (*set_rate)(struct clk_hw *hw, unsigned long); + int (*set_rate)(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate); int (*set_rate_and_parent)(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, u8 index); + unsigned long parent_rate, + u8 index); unsigned long (*recalc_accuracy)(struct clk_hw *hw, - unsigned long parent_accuracy); + unsigned long parent_accuracy); void (*init)(struct clk_hw *hw); + int (*debug_init)(struct clk_hw *hw, + struct dentry *dentry); }; Part 3 - hardware clk implementations diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 397f98505bd4..40809431641e 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -81,12 +81,20 @@ struct dentry; * this op is not set then clock rate will be initialized to 0. * * @round_rate: Given a target rate as input, returns the closest rate actually - * supported by the clock. + * supported by the clock. The parent rate is an input/output + * parameter. * * @determine_rate: Given a target rate as input, returns the closest rate * actually supported by the clock, and optionally the parent clock * that should be used to provide the clock rate. * + * @set_parent: Change the input source of this clock; for clocks with multiple + * possible parents specify a new parent by passing in the index + * as a u8 corresponding to the parent in either the .parent_names + * or .parents arrays. This function in affect translates an + * array index into the value programmed into the hardware. + * Returns 0 on success, -EERROR otherwise. + * * @get_parent: Queries the hardware to determine the parent of a clock. The * return value is a u8 which specifies the index corresponding to * the parent clock. This index can be applied to either the @@ -97,26 +105,12 @@ struct dentry; * multiple parents. It is optional (and unnecessary) for clocks * with 0 or 1 parents. * - * @set_parent: Change the input source of this clock; for clocks with multiple - * possible parents specify a new parent by passing in the index - * as a u8 corresponding to the parent in either the .parent_names - * or .parents arrays. This function in affect translates an - * array index into the value programmed into the hardware. - * Returns 0 on success, -EERROR otherwise. - * * @set_rate: Change the rate of this clock. The requested rate is specified * by the second argument, which should typically be the return * of .round_rate call. The third argument gives the parent rate * which is likely helpful for most .set_rate implementation. * Returns 0 on success, -EERROR otherwise. * - * @recalc_accuracy: Recalculate the accuracy of this clock. The clock accuracy - * is expressed in ppb (parts per billion). The parent accuracy is - * an input parameter. - * Returns the calculated accuracy. Optional - if this op is not - * set then clock accuracy will be initialized to parent accuracy - * or 0 (perfect clock) if clock has no parent. - * * @set_rate_and_parent: Change the rate and the parent of this clock. The * requested rate is specified by the second argument, which * should typically be the return of .round_rate call. The @@ -128,6 +122,18 @@ struct dentry; * separately via calls to .set_parent and .set_rate. * Returns 0 on success, -EERROR otherwise. * + * @recalc_accuracy: Recalculate the accuracy of this clock. The clock accuracy + * is expressed in ppb (parts per billion). The parent accuracy is + * an input parameter. + * Returns the calculated accuracy. Optional - if this op is not + * set then clock accuracy will be initialized to parent accuracy + * or 0 (perfect clock) if clock has no parent. + * + * @init: Perform platform-specific initialization magic. + * This is not not used by any of the basic clock types. + * Please consider other ways of solving initialization problems + * before using this callback, as its use is discouraged. + * * @debug_init: Set up type-specific debugfs entries for this clock. This * is called once, after the debugfs directory entry for this * clock has been created. The dentry pointer representing that @@ -157,15 +163,15 @@ struct clk_ops { void (*disable_unused)(struct clk_hw *hw); unsigned long (*recalc_rate)(struct clk_hw *hw, unsigned long parent_rate); - long (*round_rate)(struct clk_hw *hw, unsigned long, - unsigned long *); + long (*round_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate); long (*determine_rate)(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, struct clk **best_parent_clk); int (*set_parent)(struct clk_hw *hw, u8 index); u8 (*get_parent)(struct clk_hw *hw); - int (*set_rate)(struct clk_hw *hw, unsigned long, - unsigned long); + int (*set_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); int (*set_rate_and_parent)(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate, u8 index); -- cgit v1.2.3 From 7b42a997bfb93c6ae0709f34ec8e2860757804b5 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Fri, 18 Apr 2014 08:05:50 +0900 Subject: clk: shmobile: r8a7779: Add clocks support The R8A7779 SoC has several clocks that are too custom to be supported in a generic driver. Those clocks are all fixed rate clocks with multiplier and divisor set according to boot mode configuration. Based on work for R-Car Gen2 SoCs by Laurent Pinchart. Cc: devicetree@vger.kernel.org Acked-by: Laurent Pinchart Signed-off-by: Simon Horman Signed-off-by: Mike Turquette --- .../bindings/clock/renesas,r8a7779-cpg-clocks.txt | 27 ++++ drivers/clk/shmobile/Makefile | 1 + drivers/clk/shmobile/clk-r8a7779.c | 180 +++++++++++++++++++++ include/linux/clk/shmobile.h | 3 + 4 files changed, 211 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt create mode 100644 drivers/clk/shmobile/clk-r8a7779.c (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt new file mode 100644 index 000000000000..ed3c8cb12f4e --- /dev/null +++ b/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt @@ -0,0 +1,27 @@ +* Renesas R8A7779 Clock Pulse Generator (CPG) + +The CPG generates core clocks for the R8A7779. It includes one PLL and +several fixed ratio dividers + +Required Properties: + + - compatible: Must be "renesas,r8a7779-cpg-clocks" + - reg: Base address and length of the memory resource used by the CPG + + - clocks: Reference to the parent clock + - #clock-cells: Must be 1 + - clock-output-names: The names of the clocks. Supported clocks are "plla", + "z", "zs", "s", "s1", "p", "b", "out". + + +Example +------- + + cpg_clocks: cpg_clocks@ffc80000 { + compatible = "renesas,r8a7779-cpg-clocks"; + reg = <0 0xffc80000 0 0x30>; + clocks = <&extal_clk>; + #clock-cells = <1>; + clock-output-names = "plla", "z", "zs", "s", "s1", "p", + "b", "out"; + }; diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile index 5404cb931ebf..bdf342daefa5 100644 --- a/drivers/clk/shmobile/Makefile +++ b/drivers/clk/shmobile/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o +obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o diff --git a/drivers/clk/shmobile/clk-r8a7779.c b/drivers/clk/shmobile/clk-r8a7779.c new file mode 100644 index 000000000000..652ecacb6daf --- /dev/null +++ b/drivers/clk/shmobile/clk-r8a7779.c @@ -0,0 +1,180 @@ +/* + * r8a7779 Core CPG Clocks + * + * Copyright (C) 2013, 2014 Horms Solutions Ltd. + * + * Contact: Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CPG_NUM_CLOCKS (R8A7779_CLK_OUT + 1) + +struct r8a7779_cpg { + struct clk_onecell_data data; + spinlock_t lock; + void __iomem *reg; +}; + +/* ----------------------------------------------------------------------------- + * CPG Clock Data + */ + +/* + * MD1 = 1 MD1 = 0 + * (PLLA = 1500) (PLLA = 1600) + * (MHz) (MHz) + *------------------------------------------------+-------------------- + * clkz 1000 (2/3) 800 (1/2) + * clkzs 250 (1/6) 200 (1/8) + * clki 750 (1/2) 800 (1/2) + * clks 250 (1/6) 200 (1/8) + * clks1 125 (1/12) 100 (1/16) + * clks3 187.5 (1/8) 200 (1/8) + * clks4 93.7 (1/16) 100 (1/16) + * clkp 62.5 (1/24) 50 (1/32) + * clkg 62.5 (1/24) 66.6 (1/24) + * clkb, CLKOUT + * (MD2 = 0) 62.5 (1/24) 66.6 (1/24) + * (MD2 = 1) 41.6 (1/36) 50 (1/32) + */ + +#define CPG_CLK_CONFIG_INDEX(md) (((md) & (BIT(2)|BIT(1))) >> 1) + +struct cpg_clk_config { + unsigned int z_mult; + unsigned int z_div; + unsigned int zs_and_s_div; + unsigned int s1_div; + unsigned int p_div; + unsigned int b_and_out_div; +}; + +static const struct cpg_clk_config cpg_clk_configs[4] __initconst = { + { 1, 2, 8, 16, 32, 24 }, + { 2, 3, 6, 12, 24, 24 }, + { 1, 2, 8, 16, 32, 32 }, + { 2, 3, 6, 12, 24, 36 }, +}; + +/* + * MD PLLA Ratio + * 12 11 + *------------------------ + * 0 0 x42 + * 0 1 x48 + * 1 0 x56 + * 1 1 x64 + */ + +#define CPG_PLLA_MULT_INDEX(md) (((md) & (BIT(12)|BIT(11))) >> 11) + +static const unsigned int cpg_plla_mult[4] __initconst = { 42, 48, 56, 64 }; + +/* ----------------------------------------------------------------------------- + * Initialization + */ + +static u32 cpg_mode __initdata; + +static struct clk * __init +r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg, + const struct cpg_clk_config *config, + unsigned int plla_mult, const char *name) +{ + const char *parent_name = "plla"; + unsigned int mult = 1; + unsigned int div = 1; + + if (!strcmp(name, "plla")) { + parent_name = of_clk_get_parent_name(np, 0); + mult = plla_mult; + } else if (!strcmp(name, "z")) { + div = config->z_div; + mult = config->z_mult; + } else if (!strcmp(name, "zs") || !strcmp(name, "s")) { + div = config->zs_and_s_div; + } else if (!strcmp(name, "s1")) { + div = config->s1_div; + } else if (!strcmp(name, "p")) { + div = config->p_div; + } else if (!strcmp(name, "b") || !strcmp(name, "out")) { + div = config->b_and_out_div; + } else { + return ERR_PTR(-EINVAL); + } + + return clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); +} + +static void __init r8a7779_cpg_clocks_init(struct device_node *np) +{ + const struct cpg_clk_config *config; + struct r8a7779_cpg *cpg; + struct clk **clks; + unsigned int i, plla_mult; + int num_clks; + + num_clks = of_property_count_strings(np, "clock-output-names"); + if (num_clks < 0) { + pr_err("%s: failed to count clocks\n", __func__); + return; + } + + cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); + clks = kzalloc(CPG_NUM_CLOCKS * sizeof(*clks), GFP_KERNEL); + if (cpg == NULL || clks == NULL) { + /* We're leaking memory on purpose, there's no point in cleaning + * up as the system won't boot anyway. + */ + return; + } + + spin_lock_init(&cpg->lock); + + cpg->data.clks = clks; + cpg->data.clk_num = num_clks; + + config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(cpg_mode)]; + plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(cpg_mode)]; + + for (i = 0; i < num_clks; ++i) { + const char *name; + struct clk *clk; + + of_property_read_string_index(np, "clock-output-names", i, + &name); + + clk = r8a7779_cpg_register_clock(np, cpg, config, + plla_mult, name); + if (IS_ERR(clk)) + pr_err("%s: failed to register %s %s clock (%ld)\n", + __func__, np->name, name, PTR_ERR(clk)); + else + cpg->data.clks[i] = clk; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); +} +CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks", + r8a7779_cpg_clocks_init); + +void __init r8a7779_clocks_init(u32 mode) +{ + cpg_mode = mode; + + of_clk_init(NULL); +} diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h index f9bf080a1123..9f8a14041dd5 100644 --- a/include/linux/clk/shmobile.h +++ b/include/linux/clk/shmobile.h @@ -1,7 +1,9 @@ /* * Copyright 2013 Ideas On Board SPRL + * Copyright 2013, 2014 Horms Solutions Ltd. * * Contact: Laurent Pinchart + * Contact: Simon Horman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,6 +16,7 @@ #include +void r8a7779_clocks_init(u32 mode); void rcar_gen2_clocks_init(u32 mode); #endif -- cgit v1.2.3 From 59025887fb08a8b913605fb20f8a62eb0bb69b36 Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Tue, 13 May 2014 15:30:16 +0530 Subject: phy: Add new Exynos5 USB 3.0 PHY driver Add a new driver for the USB 3.0 PHY on Exynos5 series of SoCs. The new driver uses the generic PHY framework and will interact with DWC3 controller present on Exynos5 series of SoCs. Also, created a new header file in linux/mfd/syscon/ for Exynos5 SoCs and put the required PMU offset definitions for the basic available PHYs. Signed-off-by: Vivek Gautam Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/Kconfig | 11 + drivers/phy/Makefile | 1 + drivers/phy/phy-exynos5-usbdrd.c | 644 +++++++++++++++++++++++++++++++++ include/linux/mfd/syscon/exynos5-pmu.h | 44 +++ 4 files changed, 700 insertions(+) create mode 100644 drivers/phy/phy-exynos5-usbdrd.c create mode 100644 include/linux/mfd/syscon/exynos5-pmu.h (limited to 'include/linux') diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 071b7633bf03..16a2f067c242 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -160,6 +160,17 @@ config PHY_EXYNOS5250_USB2 particular SoC is compiled in the driver. In case of Exynos 5250 four phys are available - device, host, HSIC0 and HSIC. +config PHY_EXYNOS5_USBDRD + tristate "Exynos5 SoC series USB DRD PHY driver" + depends on ARCH_EXYNOS5 && OF + depends on HAS_IOMEM + select GENERIC_PHY + select MFD_SYSCON + help + Enable USB DRD PHY support for Exynos 5 SoC series. + This driver provides PHY interface for USB 3.0 DRD controller + present on Exynos5 SoC series. + config PHY_XGENE tristate "APM X-Gene 15Gbps PHY support" depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST) diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 7728518572a4..b4f1d5770601 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -18,4 +18,5 @@ phy-exynos-usb2-y += phy-samsung-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o +obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c new file mode 100644 index 000000000000..8fcdd9434346 --- /dev/null +++ b/drivers/phy/phy-exynos5-usbdrd.c @@ -0,0 +1,644 @@ +/* + * Samsung EXYNOS5 SoC series USB DRD PHY driver + * + * Phy provider for USB 3.0 DRD controller on Exynos5 SoC series + * + * Copyright (C) 2014 Samsung Electronics Co., Ltd. + * Author: Vivek Gautam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Exynos USB PHY registers */ +#define EXYNOS5_FSEL_9MHZ6 0x0 +#define EXYNOS5_FSEL_10MHZ 0x1 +#define EXYNOS5_FSEL_12MHZ 0x2 +#define EXYNOS5_FSEL_19MHZ2 0x3 +#define EXYNOS5_FSEL_20MHZ 0x4 +#define EXYNOS5_FSEL_24MHZ 0x5 +#define EXYNOS5_FSEL_50MHZ 0x7 + +/* EXYNOS5: USB 3.0 DRD PHY registers */ +#define EXYNOS5_DRD_LINKSYSTEM 0x04 + +#define LINKSYSTEM_FLADJ_MASK (0x3f << 1) +#define LINKSYSTEM_FLADJ(_x) ((_x) << 1) +#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27) + +#define EXYNOS5_DRD_PHYUTMI 0x08 + +#define PHYUTMI_OTGDISABLE BIT(6) +#define PHYUTMI_FORCESUSPEND BIT(1) +#define PHYUTMI_FORCESLEEP BIT(0) + +#define EXYNOS5_DRD_PHYPIPE 0x0c + +#define EXYNOS5_DRD_PHYCLKRST 0x10 + +#define PHYCLKRST_EN_UTMISUSPEND BIT(31) + +#define PHYCLKRST_SSC_REFCLKSEL_MASK (0xff << 23) +#define PHYCLKRST_SSC_REFCLKSEL(_x) ((_x) << 23) + +#define PHYCLKRST_SSC_RANGE_MASK (0x03 << 21) +#define PHYCLKRST_SSC_RANGE(_x) ((_x) << 21) + +#define PHYCLKRST_SSC_EN BIT(20) +#define PHYCLKRST_REF_SSP_EN BIT(19) +#define PHYCLKRST_REF_CLKDIV2 BIT(18) + +#define PHYCLKRST_MPLL_MULTIPLIER_MASK (0x7f << 11) +#define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF (0x19 << 11) +#define PHYCLKRST_MPLL_MULTIPLIER_50M_REF (0x32 << 11) +#define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF (0x68 << 11) +#define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF (0x7d << 11) +#define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF (0x02 << 11) + +#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5) +#define PHYCLKRST_FSEL_PIPE_MASK (0x7 << 8) +#define PHYCLKRST_FSEL(_x) ((_x) << 5) +#define PHYCLKRST_FSEL_PAD_100MHZ (0x27 << 5) +#define PHYCLKRST_FSEL_PAD_24MHZ (0x2a << 5) +#define PHYCLKRST_FSEL_PAD_20MHZ (0x31 << 5) +#define PHYCLKRST_FSEL_PAD_19_2MHZ (0x38 << 5) + +#define PHYCLKRST_RETENABLEN BIT(4) + +#define PHYCLKRST_REFCLKSEL_MASK (0x03 << 2) +#define PHYCLKRST_REFCLKSEL_PAD_REFCLK (0x2 << 2) +#define PHYCLKRST_REFCLKSEL_EXT_REFCLK (0x3 << 2) + +#define PHYCLKRST_PORTRESET BIT(1) +#define PHYCLKRST_COMMONONN BIT(0) + +#define EXYNOS5_DRD_PHYREG0 0x14 +#define EXYNOS5_DRD_PHYREG1 0x18 + +#define EXYNOS5_DRD_PHYPARAM0 0x1c + +#define PHYPARAM0_REF_USE_PAD BIT(31) +#define PHYPARAM0_REF_LOSLEVEL_MASK (0x1f << 26) +#define PHYPARAM0_REF_LOSLEVEL (0x9 << 26) + +#define EXYNOS5_DRD_PHYPARAM1 0x20 + +#define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0) +#define PHYPARAM1_PCS_TXDEEMPH (0x1c) + +#define EXYNOS5_DRD_PHYTERM 0x24 + +#define EXYNOS5_DRD_PHYTEST 0x28 + +#define PHYTEST_POWERDOWN_SSP BIT(3) +#define PHYTEST_POWERDOWN_HSP BIT(2) + +#define EXYNOS5_DRD_PHYADP 0x2c + +#define EXYNOS5_DRD_PHYUTMICLKSEL 0x30 + +#define PHYUTMICLKSEL_UTMI_CLKSEL BIT(2) + +#define EXYNOS5_DRD_PHYRESUME 0x34 +#define EXYNOS5_DRD_LINKPORT 0x44 + +#define KHZ 1000 +#define MHZ (KHZ * KHZ) + +enum exynos5_usbdrd_phy_id { + EXYNOS5_DRDPHY_UTMI, + EXYNOS5_DRDPHY_PIPE3, + EXYNOS5_DRDPHYS_NUM, +}; + +struct phy_usb_instance; +struct exynos5_usbdrd_phy; + +struct exynos5_usbdrd_phy_config { + u32 id; + void (*phy_isol)(struct phy_usb_instance *inst, u32 on); + void (*phy_init)(struct exynos5_usbdrd_phy *phy_drd); + unsigned int (*set_refclk)(struct phy_usb_instance *inst); +}; + +struct exynos5_usbdrd_phy_drvdata { + const struct exynos5_usbdrd_phy_config *phy_cfg; + u32 pmu_offset_usbdrd0_phy; + u32 pmu_offset_usbdrd1_phy; +}; + +/** + * struct exynos5_usbdrd_phy - driver data for USB 3.0 PHY + * @dev: pointer to device instance of this platform device + * @reg_phy: usb phy controller register memory base + * @clk: phy clock for register access + * @drv_data: pointer to SoC level driver data structure + * @phys[]: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY + * instances each with its 'phy' and 'phy_cfg'. + * @extrefclk: frequency select settings when using 'separate + * reference clocks' for SS and HS operations + * @ref_clk: reference clock to PHY block from which PHY's + * operational clocks are derived + * @ref_rate: rate of above reference clock + */ +struct exynos5_usbdrd_phy { + struct device *dev; + void __iomem *reg_phy; + struct clk *clk; + const struct exynos5_usbdrd_phy_drvdata *drv_data; + struct phy_usb_instance { + struct phy *phy; + u32 index; + struct regmap *reg_pmu; + u32 pmu_offset; + const struct exynos5_usbdrd_phy_config *phy_cfg; + } phys[EXYNOS5_DRDPHYS_NUM]; + u32 extrefclk; + struct clk *ref_clk; +}; + +static inline +struct exynos5_usbdrd_phy *to_usbdrd_phy(struct phy_usb_instance *inst) +{ + return container_of((inst), struct exynos5_usbdrd_phy, + phys[(inst)->index]); +} + +/* + * exynos5_rate_to_clk() converts the supplied clock rate to the value that + * can be written to the phy register. + */ +static unsigned int exynos5_rate_to_clk(unsigned long rate, u32 *reg) +{ + /* EXYNOS5_FSEL_MASK */ + + switch (rate) { + case 9600 * KHZ: + *reg = EXYNOS5_FSEL_9MHZ6; + break; + case 10 * MHZ: + *reg = EXYNOS5_FSEL_10MHZ; + break; + case 12 * MHZ: + *reg = EXYNOS5_FSEL_12MHZ; + break; + case 19200 * KHZ: + *reg = EXYNOS5_FSEL_19MHZ2; + break; + case 20 * MHZ: + *reg = EXYNOS5_FSEL_20MHZ; + break; + case 24 * MHZ: + *reg = EXYNOS5_FSEL_24MHZ; + break; + case 50 * MHZ: + *reg = EXYNOS5_FSEL_50MHZ; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst, + unsigned int on) +{ + unsigned int val; + + if (!inst->reg_pmu) + return; + + val = on ? 0 : EXYNOS5_PHY_ENABLE; + + regmap_update_bits(inst->reg_pmu, inst->pmu_offset, + EXYNOS5_PHY_ENABLE, val); +} + +/* + * Sets the pipe3 phy's clk as EXTREFCLK (XXTI) which is internal clock + * from clock core. Further sets multiplier values and spread spectrum + * clock settings for SuperSpeed operations. + */ +static unsigned int +exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst) +{ + static u32 reg; + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + + /* restore any previous reference clock settings */ + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); + + /* Use EXTREFCLK as ref clock */ + reg &= ~PHYCLKRST_REFCLKSEL_MASK; + reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; + + /* FSEL settings corresponding to reference clock */ + reg &= ~PHYCLKRST_FSEL_PIPE_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK; + switch (phy_drd->extrefclk) { + case EXYNOS5_FSEL_50MHZ: + reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF | + PHYCLKRST_SSC_REFCLKSEL(0x00)); + break; + case EXYNOS5_FSEL_24MHZ: + reg |= (PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF | + PHYCLKRST_SSC_REFCLKSEL(0x88)); + break; + case EXYNOS5_FSEL_20MHZ: + reg |= (PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF | + PHYCLKRST_SSC_REFCLKSEL(0x00)); + break; + case EXYNOS5_FSEL_19MHZ2: + reg |= (PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF | + PHYCLKRST_SSC_REFCLKSEL(0x88)); + break; + default: + dev_dbg(phy_drd->dev, "unsupported ref clk\n"); + break; + } + + return reg; +} + +/* + * Sets the utmi phy's clk as EXTREFCLK (XXTI) which is internal clock + * from clock core. Further sets the FSEL values for HighSpeed operations. + */ +static unsigned int +exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst) +{ + static u32 reg; + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + + /* restore any previous reference clock settings */ + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); + + reg &= ~PHYCLKRST_REFCLKSEL_MASK; + reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; + + reg &= ~PHYCLKRST_FSEL_UTMI_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK; + reg |= PHYCLKRST_FSEL(phy_drd->extrefclk); + + return reg; +} + +static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd) +{ + u32 reg; + + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1); + /* Set Tx De-Emphasis level */ + reg &= ~PHYPARAM1_PCS_TXDEEMPH_MASK; + reg |= PHYPARAM1_PCS_TXDEEMPH; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1); + + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); + reg &= ~PHYTEST_POWERDOWN_SSP; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); +} + +static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) +{ + u32 reg; + + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0); + /* Set Loss-of-Signal Detector sensitivity */ + reg &= ~PHYPARAM0_REF_LOSLEVEL_MASK; + reg |= PHYPARAM0_REF_LOSLEVEL; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0); + + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1); + /* Set Tx De-Emphasis level */ + reg &= ~PHYPARAM1_PCS_TXDEEMPH_MASK; + reg |= PHYPARAM1_PCS_TXDEEMPH; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1); + + /* UTMI Power Control */ + writel(PHYUTMI_OTGDISABLE, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI); + + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); + reg &= ~PHYTEST_POWERDOWN_HSP; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); +} + +static int exynos5_usbdrd_phy_init(struct phy *phy) +{ + int ret; + u32 reg; + struct phy_usb_instance *inst = phy_get_drvdata(phy); + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + + ret = clk_prepare_enable(phy_drd->clk); + if (ret) + return ret; + + /* Reset USB 3.0 PHY */ + writel(0x0, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0); + writel(0x0, phy_drd->reg_phy + EXYNOS5_DRD_PHYRESUME); + + /* + * Setting the Frame length Adj value[6:1] to default 0x20 + * See xHCI 1.0 spec, 5.2.4 + */ + reg = LINKSYSTEM_XHCI_VERSION_CONTROL | + LINKSYSTEM_FLADJ(0x20); + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM); + + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0); + /* Select PHY CLK source */ + reg &= ~PHYPARAM0_REF_USE_PAD; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0); + + /* This bit must be set for both HS and SS operations */ + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMICLKSEL); + reg |= PHYUTMICLKSEL_UTMI_CLKSEL; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMICLKSEL); + + /* UTMI or PIPE3 specific init */ + inst->phy_cfg->phy_init(phy_drd); + + /* reference clock settings */ + reg = inst->phy_cfg->set_refclk(inst); + + /* Digital power supply in normal operating mode */ + reg |= PHYCLKRST_RETENABLEN | + /* Enable ref clock for SS function */ + PHYCLKRST_REF_SSP_EN | + /* Enable spread spectrum */ + PHYCLKRST_SSC_EN | + /* Power down HS Bias and PLL blocks in suspend mode */ + PHYCLKRST_COMMONONN | + /* Reset the port */ + PHYCLKRST_PORTRESET; + + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); + + udelay(10); + + reg &= ~PHYCLKRST_PORTRESET; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); + + clk_disable_unprepare(phy_drd->clk); + + return 0; +} + +static int exynos5_usbdrd_phy_exit(struct phy *phy) +{ + int ret; + u32 reg; + struct phy_usb_instance *inst = phy_get_drvdata(phy); + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + + ret = clk_prepare_enable(phy_drd->clk); + if (ret) + return ret; + + reg = PHYUTMI_OTGDISABLE | + PHYUTMI_FORCESUSPEND | + PHYUTMI_FORCESLEEP; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI); + + /* Resetting the PHYCLKRST enable bits to reduce leakage current */ + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); + reg &= ~(PHYCLKRST_REF_SSP_EN | + PHYCLKRST_SSC_EN | + PHYCLKRST_COMMONONN); + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); + + /* Control PHYTEST to remove leakage current */ + reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); + reg |= PHYTEST_POWERDOWN_SSP | + PHYTEST_POWERDOWN_HSP; + writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); + + clk_disable_unprepare(phy_drd->clk); + + return 0; +} + +static int exynos5_usbdrd_phy_power_on(struct phy *phy) +{ + struct phy_usb_instance *inst = phy_get_drvdata(phy); + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + + dev_dbg(phy_drd->dev, "Request to power_on usbdrd_phy phy\n"); + + clk_prepare_enable(phy_drd->ref_clk); + + /* Power-on PHY*/ + inst->phy_cfg->phy_isol(inst, 0); + + return 0; +} + +static int exynos5_usbdrd_phy_power_off(struct phy *phy) +{ + struct phy_usb_instance *inst = phy_get_drvdata(phy); + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + + dev_dbg(phy_drd->dev, "Request to power_off usbdrd_phy phy\n"); + + /* Power-off the PHY */ + inst->phy_cfg->phy_isol(inst, 1); + + clk_disable_unprepare(phy_drd->ref_clk); + + return 0; +} + +static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); + + if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) + return ERR_PTR(-ENODEV); + + return phy_drd->phys[args->args[0]].phy; +} + +static struct phy_ops exynos5_usbdrd_phy_ops = { + .init = exynos5_usbdrd_phy_init, + .exit = exynos5_usbdrd_phy_exit, + .power_on = exynos5_usbdrd_phy_power_on, + .power_off = exynos5_usbdrd_phy_power_off, + .owner = THIS_MODULE, +}; + +const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = { + { + .id = EXYNOS5_DRDPHY_UTMI, + .phy_isol = exynos5_usbdrd_phy_isol, + .phy_init = exynos5_usbdrd_utmi_init, + .set_refclk = exynos5_usbdrd_utmi_set_refclk, + }, + { + .id = EXYNOS5_DRDPHY_PIPE3, + .phy_isol = exynos5_usbdrd_phy_isol, + .phy_init = exynos5_usbdrd_pipe3_init, + .set_refclk = exynos5_usbdrd_pipe3_set_refclk, + }, +}; + +const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = { + .phy_cfg = phy_cfg_exynos5, + .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, + .pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL, +}; + +const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = { + .phy_cfg = phy_cfg_exynos5, + .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, +}; + +static const struct of_device_id exynos5_usbdrd_phy_of_match[] = { + { + .compatible = "samsung,exynos5250-usbdrd-phy", + .data = &exynos5250_usbdrd_phy + }, { + .compatible = "samsung,exynos5420-usbdrd-phy", + .data = &exynos5420_usbdrd_phy + }, + { }, +}; + +static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct exynos5_usbdrd_phy *phy_drd; + struct phy_provider *phy_provider; + struct resource *res; + const struct of_device_id *match; + const struct exynos5_usbdrd_phy_drvdata *drv_data; + struct regmap *reg_pmu; + u32 pmu_offset; + unsigned long ref_rate; + int i, ret; + int channel; + + phy_drd = devm_kzalloc(dev, sizeof(*phy_drd), GFP_KERNEL); + if (!phy_drd) + return -ENOMEM; + + dev_set_drvdata(dev, phy_drd); + phy_drd->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + phy_drd->reg_phy = devm_ioremap_resource(dev, res); + if (IS_ERR(phy_drd->reg_phy)) + return PTR_ERR(phy_drd->reg_phy); + + match = of_match_node(exynos5_usbdrd_phy_of_match, pdev->dev.of_node); + + drv_data = match->data; + phy_drd->drv_data = drv_data; + + phy_drd->clk = devm_clk_get(dev, "phy"); + if (IS_ERR(phy_drd->clk)) { + dev_err(dev, "Failed to get clock of phy controller\n"); + return PTR_ERR(phy_drd->clk); + } + + phy_drd->ref_clk = devm_clk_get(dev, "ref"); + if (IS_ERR(phy_drd->ref_clk)) { + dev_err(dev, "Failed to get reference clock of usbdrd phy\n"); + return PTR_ERR(phy_drd->ref_clk); + } + ref_rate = clk_get_rate(phy_drd->ref_clk); + + ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk); + if (ret) { + dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n", + ref_rate); + return ret; + } + + reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,pmu-syscon"); + if (IS_ERR(reg_pmu)) { + dev_err(dev, "Failed to lookup PMU regmap\n"); + return PTR_ERR(reg_pmu); + } + + /* + * Exynos5420 SoC has multiple channels for USB 3.0 PHY, with + * each having separate power control registers. + * 'channel' facilitates to set such registers. + */ + channel = of_alias_get_id(node, "usbdrdphy"); + if (channel < 0) + dev_dbg(dev, "Not a multi-controller usbdrd phy\n"); + + switch (channel) { + case 1: + pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd1_phy; + break; + case 0: + default: + pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd0_phy; + break; + } + + dev_vdbg(dev, "Creating usbdrd_phy phy\n"); + + for (i = 0; i < EXYNOS5_DRDPHYS_NUM; i++) { + struct phy *phy = devm_phy_create(dev, &exynos5_usbdrd_phy_ops, + NULL); + if (IS_ERR(phy)) { + dev_err(dev, "Failed to create usbdrd_phy phy\n"); + return PTR_ERR(phy); + } + + phy_drd->phys[i].phy = phy; + phy_drd->phys[i].index = i; + phy_drd->phys[i].reg_pmu = reg_pmu; + phy_drd->phys[i].pmu_offset = pmu_offset; + phy_drd->phys[i].phy_cfg = &drv_data->phy_cfg[i]; + phy_set_drvdata(phy, &phy_drd->phys[i]); + } + + phy_provider = devm_of_phy_provider_register(dev, + exynos5_usbdrd_phy_xlate); + if (IS_ERR(phy_provider)) { + dev_err(phy_drd->dev, "Failed to register phy provider\n"); + return PTR_ERR(phy_provider); + } + + return 0; +} + +static struct platform_driver exynos5_usb3drd_phy = { + .probe = exynos5_usbdrd_phy_probe, + .driver = { + .of_match_table = exynos5_usbdrd_phy_of_match, + .name = "exynos5_usb3drd_phy", + .owner = THIS_MODULE, + } +}; + +module_platform_driver(exynos5_usb3drd_phy); +MODULE_DESCRIPTION("Samsung EXYNOS5 SoCs USB 3.0 DRD controller PHY driver"); +MODULE_AUTHOR("Vivek Gautam "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:exynos5_usb3drd_phy"); diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h new file mode 100644 index 000000000000..00ef24bf6ede --- /dev/null +++ b/include/linux/mfd/syscon/exynos5-pmu.h @@ -0,0 +1,44 @@ +/* + * Exynos5 SoC series Power Management Unit (PMU) register offsets + * and bit definitions. + * + * Copyright (C) 2014 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ +#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ + +/* Exynos5 PMU register definitions */ +#define EXYNOS5_HDMI_PHY_CONTROL (0x700) +#define EXYNOS5_USBDRD_PHY_CONTROL (0x704) + +/* Exynos5250 specific register definitions */ +#define EXYNOS5_USBHOST_PHY_CONTROL (0x708) +#define EXYNOS5_EFNAND_PHY_CONTROL (0x70c) +#define EXYNOS5_MIPI_PHY0_CONTROL (0x710) +#define EXYNOS5_MIPI_PHY1_CONTROL (0x714) +#define EXYNOS5_ADC_PHY_CONTROL (0x718) +#define EXYNOS5_MTCADC_PHY_CONTROL (0x71c) +#define EXYNOS5_DPTX_PHY_CONTROL (0x720) +#define EXYNOS5_SATA_PHY_CONTROL (0x724) + +/* Exynos5420 specific register definitions */ +#define EXYNOS5420_USBDRD1_PHY_CONTROL (0x708) +#define EXYNOS5420_USBHOST_PHY_CONTROL (0x70c) +#define EXYNOS5420_MIPI_PHY0_CONTROL (0x714) +#define EXYNOS5420_MIPI_PHY1_CONTROL (0x718) +#define EXYNOS5420_MIPI_PHY2_CONTROL (0x71c) +#define EXYNOS5420_ADC_PHY_CONTROL (0x720) +#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724) +#define EXYNOS5420_DPTX_PHY_CONTROL (0x728) + +#define EXYNOS5_PHY_ENABLE BIT(0) + +#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1) +#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2) + +#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */ -- cgit v1.2.3 From ad0dc7f94dbf417b1c7d42e1f0b250f045b27f8f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 19 Feb 2014 10:51:42 -0800 Subject: rcutorture: Add forward-progress checking for writer The rcutorture output currently does not distinguish between stalls in the RCU implementation and stalls in the rcu_torture_writer() kthreads. This commit therefore adds some diagnostics to help distinguish between these two conditions, at least for the non-SRCU implementations. (SRCU does not provide evidence of update-side forward progress by design.) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 19 +++++++++++++++++++ kernel/rcu/rcutorture.c | 37 +++++++++++++++++++++++++++++++++++++ kernel/rcu/tree.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 00a7fd61b3c6..82973738125b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -51,7 +51,17 @@ extern int rcu_expedited; /* for sysctl */ extern int rcutorture_runnable; /* for sysctl */ #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ +enum rcutorture_type { + RCU_FLAVOR, + RCU_BH_FLAVOR, + RCU_SCHED_FLAVOR, + SRCU_FLAVOR, + INVALID_RCU_FLAVOR +}; + #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, + unsigned long *gpnum, unsigned long *completed); void rcutorture_record_test_transition(void); void rcutorture_record_progress(unsigned long vernum); void do_trace_rcu_torture_read(const char *rcutorturename, @@ -60,6 +70,15 @@ void do_trace_rcu_torture_read(const char *rcutorturename, unsigned long c_old, unsigned long c); #else +static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, + int *flags, + unsigned long *gpnum, + unsigned long *completed) +{ + *flags = 0; + *gpnum = 0; + *completed = 0; +} static inline void rcutorture_record_test_transition(void) { } diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index bd30bc61bc05..0d739e3797e3 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -138,6 +138,15 @@ static long n_barrier_attempts; static long n_barrier_successes; static struct list_head rcu_torture_removed; +static int rcu_torture_writer_state; +#define RTWS_FIXED_DELAY 0 +#define RTWS_DELAY 1 +#define RTWS_REPLACE 2 +#define RTWS_DEF_FREE 3 +#define RTWS_EXP_SYNC 4 +#define RTWS_STUTTER 5 +#define RTWS_STOPPING 6 + #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) #define RCUTORTURE_RUNNABLE_INIT 1 #else @@ -214,6 +223,7 @@ rcu_torture_free(struct rcu_torture *p) */ struct rcu_torture_ops { + int ttype; void (*init)(void); int (*readlock)(void); void (*read_delay)(struct torture_random_state *rrsp); @@ -312,6 +322,7 @@ static void rcu_sync_torture_init(void) } static struct rcu_torture_ops rcu_ops = { + .ttype = RCU_FLAVOR, .init = rcu_sync_torture_init, .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, @@ -355,6 +366,7 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p) } static struct rcu_torture_ops rcu_bh_ops = { + .ttype = RCU_BH_FLAVOR, .init = rcu_sync_torture_init, .readlock = rcu_bh_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ @@ -397,6 +409,7 @@ call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } static struct rcu_torture_ops rcu_busted_ops = { + .ttype = INVALID_RCU_FLAVOR, .init = rcu_sync_torture_init, .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ @@ -492,6 +505,7 @@ static void srcu_torture_synchronize_expedited(void) } static struct rcu_torture_ops srcu_ops = { + .ttype = SRCU_FLAVOR, .init = rcu_sync_torture_init, .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, @@ -527,6 +541,7 @@ static void rcu_sched_torture_deferred_free(struct rcu_torture *p) } static struct rcu_torture_ops sched_ops = { + .ttype = RCU_SCHED_FLAVOR, .init = rcu_sync_torture_init, .readlock = sched_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ @@ -699,12 +714,15 @@ rcu_torture_writer(void *arg) set_user_nice(current, MAX_NICE); do { + rcu_torture_writer_state = RTWS_FIXED_DELAY; schedule_timeout_uninterruptible(1); rp = rcu_torture_alloc(); if (rp == NULL) continue; rp->rtort_pipe_count = 0; + rcu_torture_writer_state = RTWS_DELAY; udelay(torture_random(&rand) & 0x3ff); + rcu_torture_writer_state = RTWS_REPLACE; old_rp = rcu_dereference_check(rcu_torture_current, current == writer_task); rp->rtort_mbtest = 1; @@ -721,8 +739,10 @@ rcu_torture_writer(void *arg) else exp = gp_exp; if (!exp) { + rcu_torture_writer_state = RTWS_DEF_FREE; cur_ops->deferred_free(old_rp); } else { + rcu_torture_writer_state = RTWS_EXP_SYNC; cur_ops->exp_sync(); list_add(&old_rp->rtort_free, &rcu_torture_removed); @@ -743,8 +763,10 @@ rcu_torture_writer(void *arg) } } rcutorture_record_progress(++rcu_torture_current_version); + rcu_torture_writer_state = RTWS_STUTTER; stutter_wait("rcu_torture_writer"); } while (!torture_must_stop()); + rcu_torture_writer_state = RTWS_STOPPING; torture_kthread_stopping("rcu_torture_writer"); return 0; } @@ -937,6 +959,7 @@ rcu_torture_printk(char *page) int i; long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; + static unsigned long rtcv_snap = ULONG_MAX; for_each_possible_cpu(cpu) { for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { @@ -997,6 +1020,20 @@ rcu_torture_printk(char *page) page += sprintf(page, "\n"); if (cur_ops->stats) cur_ops->stats(page); + if (rtcv_snap == rcu_torture_current_version && + rcu_torture_current != NULL) { + int __maybe_unused flags; + unsigned long __maybe_unused gpnum; + unsigned long __maybe_unused completed; + + rcutorture_get_gp_data(cur_ops->ttype, + &flags, &gpnum, &completed); + page += sprintf(page, + "??? Writer stall state %d g%lu c%lu f%#x\n", + rcu_torture_writer_state, + gpnum, completed, flags); + } + rtcv_snap = rcu_torture_current_version; } /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0c47e300210a..3d15b5a82ae8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -293,6 +293,39 @@ void rcutorture_record_test_transition(void) } EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); +/* + * Send along grace-period-related data for rcutorture diagnostics. + */ +void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, + unsigned long *gpnum, unsigned long *completed) +{ + struct rcu_state *rsp = NULL; + + switch (test_type) { + case RCU_FLAVOR: + rsp = rcu_state; + break; + case RCU_BH_FLAVOR: + rsp = &rcu_bh_state; + break; + case RCU_SCHED_FLAVOR: + rsp = &rcu_sched_state; + break; + default: + break; + } + if (rsp != NULL) { + *flags = ACCESS_ONCE(rsp->gp_flags); + *gpnum = ACCESS_ONCE(rsp->gpnum); + *completed = ACCESS_ONCE(rsp->completed); + return; + } + *flags = 0; + *gpnum = 0; + *completed = 0; +} +EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); + /* * Record the number of writer passes through the current rcutorture test. * This is also used to correlate debugfs tracing stats with the rcutorture -- cgit v1.2.3 From d9c6866be8a145e32da616d8dcbae806032d75b5 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 7 May 2014 15:23:56 -0500 Subject: of: kill off of_can_translate_address of_can_translate_address only checks some conditions for address translation, but does not check other conditions like having range properties. The checks it does do are redundant with __of_address_translate. The only difference is printing a message or not. Since we only have a single caller that does the full translation anyway, just remove of_can_translate_address and quiet the error message. Cc: Grant Likely Signed-off-by: Rob Herring Tested-by: Frank Rowand Reviewed-by: Frank Rowand --- drivers/of/address.c | 22 +--------------------- drivers/of/platform.c | 5 ++--- include/linux/of_address.h | 1 - 3 files changed, 3 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/address.c b/drivers/of/address.c index cb4242a69cd5..95351b2a112c 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -498,8 +498,7 @@ static u64 __of_translate_address(struct device_node *dev, /* Count address cells & copy address locally */ bus->count_cells(dev, &na, &ns); if (!OF_CHECK_COUNTS(na, ns)) { - printk(KERN_ERR "prom_parse: Bad cell count for %s\n", - of_node_full_name(dev)); + pr_debug("OF: Bad cell count for %s\n", of_node_full_name(dev)); goto bail; } memcpy(addr, in_addr, na * 4); @@ -564,25 +563,6 @@ u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) } EXPORT_SYMBOL(of_translate_dma_address); -bool of_can_translate_address(struct device_node *dev) -{ - struct device_node *parent; - struct of_bus *bus; - int na, ns; - - parent = of_get_parent(dev); - if (parent == NULL) - return false; - - bus = of_match_bus(parent); - bus->count_cells(dev, &na, &ns); - - of_node_put(parent); - - return OF_CHECK_COUNTS(na, ns); -} -EXPORT_SYMBOL(of_can_translate_address); - const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags) { diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 0602eb5b1be2..d0009b3614af 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -140,9 +140,8 @@ struct platform_device *of_device_alloc(struct device_node *np, return NULL; /* count the io and irq resources */ - if (of_can_translate_address(np)) - while (of_address_to_resource(np, num_reg, &temp_res) == 0) - num_reg++; + while (of_address_to_resource(np, num_reg, &temp_res) == 0) + num_reg++; num_irq = of_irq_count(np); /* Populate the resource table */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 5f6ed6b182b8..906ca7681756 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -40,7 +40,6 @@ extern u64 of_translate_dma_address(struct device_node *dev, #ifdef CONFIG_OF_ADDRESS extern u64 of_translate_address(struct device_node *np, const __be32 *addr); -extern bool of_can_translate_address(struct device_node *dev); extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); extern struct device_node *of_find_matching_node_by_address( -- cgit v1.2.3 From 0d2602ca30e410e84e8bdf05c84ed5688e0a5a44 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 13 May 2014 15:10:52 -0600 Subject: blk-mq: improve support for shared tags maps This adds support for active queue tracking, meaning that the blk-mq tagging maintains a count of active users of a tag set. This allows us to maintain a notion of fairness between users, so that we can distribute the tag depth evenly without starving some users while allowing others to try unfair deep queues. If sharing of a tag set is detected, each hardware queue will track the depth of its own queue. And if this exceeds the total depth divided by the number of active queues, the user is actively throttled down. The active queue count is done lazily to avoid bouncing that data between submitter and completer. Each hardware queue gets marked active when it allocates its first tag, and gets marked inactive when 1) the last tag is cleared, and 2) the queue timeout grace period has passed. Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 10 +++++ block/blk-mq-tag.c | 112 +++++++++++++++++++++++++++++++++++++++------- block/blk-mq-tag.h | 27 +++++++++-- block/blk-mq.c | 85 ++++++++++++++++++++++++++++++++--- block/blk-timeout.c | 13 +++++- block/blk.h | 4 ++ include/linux/blk-mq.h | 7 +++ include/linux/blk_types.h | 2 + include/linux/blkdev.h | 3 ++ 9 files changed, 236 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 8145b5b25b4b..99a60a829e69 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -208,6 +208,11 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) return blk_mq_tag_sysfs_show(hctx->tags, page); } +static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); +} + static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { unsigned int i, first = 1; @@ -267,6 +272,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_dispatched_show, }; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = { + .attr = {.name = "active", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_active_show, +}; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { .attr = {.name = "pending", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_rq_list_show, @@ -287,6 +296,7 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_pending.attr, &blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_cpus.attr, + &blk_mq_hw_sysfs_active.attr, NULL, }; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 8d526a3e02f6..c80086c9c064 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -7,13 +7,12 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, - bool reserved) +void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved) { int tag, zero = 0; - tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved); - blk_mq_put_tag(tags, tag, &zero); + tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved); + blk_mq_put_tag(hctx, tag, &zero); } static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) @@ -40,6 +39,84 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags) return bt_has_free_tags(&tags->bitmap_tags); } +static inline void bt_index_inc(unsigned int *index) +{ + *index = (*index + 1) & (BT_WAIT_QUEUES - 1); +} + +/* + * If a previously inactive queue goes active, bump the active user count. + */ +bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + atomic_inc(&hctx->tags->active_queues); + + return true; +} + +/* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_bitmap_tags *bt; + int i, wake_index; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + /* + * Will only throttle depth on non-reserved tags + */ + bt = &tags->bitmap_tags; + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) + wake_up(&bs->wait); + + bt_index_inc(&wake_index); + } +} + +/* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ +static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct blk_mq_bitmap_tags *bt) +{ + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; +} + static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) { int tag, org_last_tag, end; @@ -78,11 +155,15 @@ restart: * multiple users will tend to stick to different cachelines, at least * until the map is exhausted. */ -static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) +static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, + unsigned int *tag_cache) { unsigned int last_tag, org_last_tag; int index, i, tag; + if (!hctx_may_queue(hctx, bt)) + return -1; + last_tag = org_last_tag = *tag_cache; index = TAG_TO_INDEX(bt, last_tag); @@ -117,11 +198,6 @@ done: return tag; } -static inline void bt_index_inc(unsigned int *index) -{ - *index = (*index + 1) & (BT_WAIT_QUEUES - 1); -} - static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx) { @@ -142,7 +218,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, DEFINE_WAIT(wait); int tag; - tag = __bt_get(bt, last_tag); + tag = __bt_get(hctx, bt, last_tag); if (tag != -1) return tag; @@ -156,7 +232,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, was_empty = list_empty(&wait.task_list); prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); - tag = __bt_get(bt, last_tag); + tag = __bt_get(hctx, bt, last_tag); if (tag != -1) break; @@ -200,14 +276,13 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, return tag; } -unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, - struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, +unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved) { if (!reserved) - return __blk_mq_get_tag(tags, hctx, last_tag, gfp); + return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp); - return __blk_mq_get_reserved_tag(tags, gfp); + return __blk_mq_get_reserved_tag(hctx->tags, gfp); } static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) @@ -265,9 +340,11 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, bt_clear_tag(&tags->breserved_tags, tag); } -void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, +void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag) { + struct blk_mq_tags *tags = hctx->tags; + if (tag >= tags->nr_reserved_tags) { const int real_tag = tag - tags->nr_reserved_tags; @@ -465,6 +542,7 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) res = bt_unused_tags(&tags->breserved_tags); page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); + page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); return page - orig_page; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 7aa9f0665489..0f5ec8b50ef3 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -38,6 +38,8 @@ struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; + atomic_t active_queues; + struct blk_mq_bitmap_tags bitmap_tags; struct blk_mq_bitmap_tags breserved_tags; @@ -49,9 +51,9 @@ struct blk_mq_tags { extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); -extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, bool reserved); -extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, unsigned int *last_tag); +extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); +extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved); +extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); @@ -68,4 +70,23 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, }; +extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); +extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); + +static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return false; + + return __blk_mq_tag_busy(hctx); +} + +static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return; + + __blk_mq_tag_idle(hctx); +} + #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 9f07a266f7ab..3c4f1fceef8e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -80,9 +80,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, struct request *rq; unsigned int tag; - tag = blk_mq_get_tag(hctx->tags, hctx, &ctx->last_tag, gfp, reserved); + tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; + + rq->cmd_flags = 0; + if (blk_mq_tag_busy(hctx)) { + rq->cmd_flags = REQ_MQ_INFLIGHT; + atomic_inc(&hctx->nr_active); + } + rq->tag = tag; return rq; } @@ -190,7 +197,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, /* csd/requeue_work/fifo_time is initialized before use */ rq->q = q; rq->mq_ctx = ctx; - rq->cmd_flags = rw_flags; + rq->cmd_flags |= rw_flags; rq->cmd_type = 0; /* do not touch atomic flags, it needs atomic ops against the timer */ rq->cpu = -1; @@ -262,7 +269,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_wait_for_tags(hctx->tags, hctx, reserved); + blk_mq_wait_for_tags(hctx, reserved); } while (1); return rq; @@ -303,8 +310,11 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; + if (rq->cmd_flags & REQ_MQ_INFLIGHT) + atomic_dec(&hctx->nr_active); + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); - blk_mq_put_tag(hctx->tags, tag, &ctx->last_tag); + blk_mq_put_tag(hctx, tag, &ctx->last_tag); blk_mq_queue_exit(q); } @@ -571,8 +581,13 @@ static void blk_mq_rq_timer(unsigned long data) queue_for_each_hw_ctx(q, hctx, i) blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); - if (next_set) - mod_timer(&q->timeout, round_jiffies_up(next)); + if (next_set) { + next = blk_rq_timeout(round_jiffies_up(next)); + mod_timer(&q->timeout, next); + } else { + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_tag_idle(hctx); + } } /* @@ -1439,6 +1454,56 @@ static void blk_mq_map_swqueue(struct request_queue *q) } } +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + bool shared; + int i; + + if (set->tag_list.next == set->tag_list.prev) + shared = false; + else + shared = true; + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_freeze_queue(q); + + queue_for_each_hw_ctx(q, hctx, i) { + if (shared) + hctx->flags |= BLK_MQ_F_TAG_SHARED; + else + hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } + blk_mq_unfreeze_queue(q); + } +} + +static void blk_mq_del_queue_tag_set(struct request_queue *q) +{ + struct blk_mq_tag_set *set = q->tag_set; + + blk_mq_freeze_queue(q); + + mutex_lock(&set->tag_list_lock); + list_del_init(&q->tag_set_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); + + blk_mq_unfreeze_queue(q); +} + +static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, + struct request_queue *q) +{ + q->tag_set = set; + + mutex_lock(&set->tag_list_lock); + list_add_tail(&q->tag_set_list, &set->tag_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); +} + struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; @@ -1464,6 +1529,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL)) goto err_hctxs; + atomic_set(&hctxs[i]->nr_active, 0); hctxs[i]->numa_node = NUMA_NO_NODE; hctxs[i]->queue_num = i; } @@ -1516,6 +1582,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) list_add_tail(&q->all_q_node, &all_q_list); mutex_unlock(&all_q_mutex); + blk_mq_add_queue_tag_set(set, q); + return q; err_flush_rq: @@ -1543,6 +1611,8 @@ void blk_mq_free_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + blk_mq_del_queue_tag_set(q); + queue_for_each_hw_ctx(q, hctx, i) { kfree(hctx->ctx_map); kfree(hctx->ctxs); @@ -1635,6 +1705,9 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) goto out_unwind; } + mutex_init(&set->tag_list_lock); + INIT_LIST_HEAD(&set->tag_list); + return 0; out_unwind: diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 448745683d28..43e8b515806f 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -166,6 +166,17 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); +unsigned long blk_rq_timeout(unsigned long timeout) +{ + unsigned long maxt; + + maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT); + if (time_after(timeout, maxt)) + timeout = maxt; + + return timeout; +} + /** * blk_add_timer - Start timeout timer for a single request * @req: request that is about to start running. @@ -200,7 +211,7 @@ void blk_add_timer(struct request *req) * than an existing one, modify the timer. Round up to next nearest * second. */ - expiry = round_jiffies_up(req->deadline); + expiry = blk_rq_timeout(round_jiffies_up(req->deadline)); if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) { diff --git a/block/blk.h b/block/blk.h index 79be2cbce7fd..95cab70000e3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -9,6 +9,9 @@ /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 +/* Max future timer expiry for timeouts */ +#define BLK_MAX_TIMEOUT (5 * HZ) + extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; @@ -37,6 +40,7 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, unsigned int *next_set); +unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); void blk_delete_timer(struct request *); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f83d15f6e1c1..379f88d5c44d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -48,6 +48,8 @@ struct blk_mq_hw_ctx { unsigned int numa_node; unsigned int cmd_size; /* per-request extra data */ + atomic_t nr_active; + struct blk_mq_cpu_notifier cpu_notifier; struct kobject kobj; }; @@ -64,6 +66,9 @@ struct blk_mq_tag_set { void *driver_data; struct blk_mq_tags **tags; + + struct mutex tag_list_lock; + struct list_head tag_list; }; typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); @@ -126,8 +131,10 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, + BLK_MQ_F_TAG_SHARED = 1 << 2, BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_MAX_DEPTH = 2048, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index aa0eaa2d0bd8..d8e4cea23a25 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -190,6 +190,7 @@ enum rq_flag_bits { __REQ_PM, /* runtime pm request */ __REQ_END, /* last of chain of requests */ __REQ_HASHED, /* on IO scheduler merge hash */ + __REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_NR_BITS, /* stops here */ }; @@ -243,5 +244,6 @@ enum rq_flag_bits { #define REQ_PM (1ULL << __REQ_PM) #define REQ_END (1ULL << __REQ_END) #define REQ_HASHED (1ULL << __REQ_HASHED) +#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 94b27210641b..6bc011a09e82 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -481,6 +481,9 @@ struct request_queue { wait_queue_head_t mq_freeze_wq; struct percpu_counter mq_usage_counter; struct list_head all_q_node; + + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ -- cgit v1.2.3 From 19824d5eeecedfb46639961da1b7a21ba3179930 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Thu, 8 May 2014 14:06:22 +0200 Subject: usb: gadget: OS String support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a custom (non-USB IF) extension to the USB standard: http://msdn.microsoft.com/library/windows/hardware/gg463182 They grant permission to use the specification - there is "Microsoft OS Descriptor Specification License Agreement" under the link mentioned above, and its Section 2 "Grant of License", letter (b) reads: "Patent license. Microsoft hereby grants to You a nonexclusive, royalty-free, nontransferable, worldwide license under Microsoft’s patents embodied solely within the Specification and that are owned or licensable by Microsoft to make, use, import, offer to sell, sell and distribute directly or indirectly to Your Licensees Your Implementation. You may sublicense this patent license to Your Licensees under the same terms and conditions." The said extension is maintained by Microsoft for Microsoft. Yet it is fairly common for various devices to use it, and a popular proprietary operating system expects devices to provide "OS descriptors", so Linux-based USB gadgets whishing to be able to talk to a variety of operating systems should be able to provide the "OS descriptors". This patch adds optional support for gadgets whishing to expose the so called "OS String" under index 0xEE of language 0. The contents of the string is generated based on the qw_sign array and b_vendor_code. Interested gadgets need to set the cdev->use_os_string flag, fill cdev->qw_sign with appropriate values and fill cdev->b_vendor_code with a value of their choice. This patch does not however implement responding to any vendor-specific USB requests. Signed-off-by: Andrzej Pietrasiewicz Acked-by: Michal Nazarewicz Signed-off-by: Felipe Balbi --- drivers/usb/gadget/composite.c | 29 +++++++++++++++++++++++++++++ include/linux/usb/composite.h | 11 +++++++++++ 2 files changed, 40 insertions(+) (limited to 'include/linux') diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 8060de6562cd..2f87b1697bf5 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -21,6 +21,22 @@ #include #include +/** + * struct usb_os_string - represents OS String to be reported by a gadget + * @bLength: total length of the entire descritor, always 0x12 + * @bDescriptorType: USB_DT_STRING + * @qwSignature: the OS String proper + * @bMS_VendorCode: code used by the host for subsequent requests + * @bPad: not used, must be zero + */ +struct usb_os_string { + __u8 bLength; + __u8 bDescriptorType; + __u8 qwSignature[OS_STRING_QW_SIGN_LEN]; + __u8 bMS_VendorCode; + __u8 bPad; +} __packed; + /* * The code in this file is utility code, used to build a gadget driver * from one or more "function" drivers, one or more "configuration" @@ -961,6 +977,19 @@ static int get_string(struct usb_composite_dev *cdev, return s->bLength; } + if (cdev->use_os_string && language == 0 && id == OS_STRING_IDX) { + struct usb_os_string *b = buf; + b->bLength = sizeof(*b); + b->bDescriptorType = USB_DT_STRING; + compiletime_assert( + sizeof(b->qwSignature) == sizeof(cdev->qw_sign), + "qwSignature size must be equal to qw_sign"); + memcpy(&b->qwSignature, cdev->qw_sign, sizeof(b->qwSignature)); + b->bMS_VendorCode = cdev->b_vendor_code; + b->bPad = 0; + return sizeof(*b); + } + list_for_each_entry(uc, &cdev->gstrings, list) { struct usb_gadget_strings **sp; diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index d3ca3b53837c..7d29ee9363e8 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -335,11 +335,17 @@ static inline struct usb_composite_driver *to_cdriver( return container_of(gdrv, struct usb_composite_driver, gadget_driver); } +#define OS_STRING_QW_SIGN_LEN 14 +#define OS_STRING_IDX 0xEE + /** * struct usb_composite_device - represents one composite usb gadget * @gadget: read-only, abstracts the gadget's usb peripheral controller * @req: used for control responses; buffer is pre-allocated * @config: the currently active configuration + * @qw_sign: qwSignature part of the OS string + * @b_vendor_code: bMS_VendorCode part of the OS string + * @use_os_string: false by default, interested gadgets set it * * One of these devices is allocated and initialized before the * associated device driver's bind() is called. @@ -372,6 +378,11 @@ struct usb_composite_dev { struct usb_configuration *config; + /* OS String is a custom (yet popular) extension to the USB standard. */ + u8 qw_sign[OS_STRING_QW_SIGN_LEN]; + u8 b_vendor_code; + unsigned int use_os_string:1; + /* private: */ /* internals */ unsigned int suspended:1; -- cgit v1.2.3 From 37a3a533429ef9b3cc9f15a656c19623f0e88df7 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Thu, 8 May 2014 14:06:23 +0200 Subject: usb: gadget: OS Feature Descriptors support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a custom (non-USB IF) extension to the USB standard: http://msdn.microsoft.com/library/windows/hardware/gg463182 They grant permission to use the specification - there is "Microsoft OS Descriptor Specification License Agreement" under the link mentioned above, and its Section 2 "Grant of License", letter (b) reads: "Patent license. Microsoft hereby grants to You a nonexclusive, royalty-free, nontransferable, worldwide license under Microsoft’s patents embodied solely within the Specification and that are owned or licensable by Microsoft to make, use, import, offer to sell, sell and distribute directly or indirectly to Your Licensees Your Implementation. You may sublicense this patent license to Your Licensees under the same terms and conditions." The said extension is maintained by Microsoft for Microsoft. Yet it is fairly common for various devices to use it, and a popular proprietary operating system expects devices to provide "OS descriptors", so Linux-based USB gadgets whishing to be able to talk to a variety of operating systems should be able to provide the "OS descriptors". This patch adds optional support for gadgets whishing to expose the so called "OS Feature Descriptors", that is "Extended Compatibility ID" and "Extended Properties". Hosts which do request "OS descriptors" from gadgets do so during the enumeration phase and before the configuration is set with SET_CONFIGURATION. What is more, those hosts never ask for configurations at indices other than 0. Therefore, gadgets whishing to provide "OS descriptors" must designate one configuration to be used with this kind of hosts - this is what os_desc_config is added for in struct usb_composite_dev. There is an additional advantage to it: if a gadget provides "OS descriptors" and designates one configuration to be used with such non-USB-compliant hosts it can invoke "usb_add_config" in any order because the designated configuration will be reported to be at index 0 anyway. This patch also adds handling vendor-specific requests addressed at device or interface and related to handling "OS descriptors". Signed-off-by: Andrzej Pietrasiewicz Acked-by: Michal Nazarewicz Signed-off-by: Felipe Balbi --- drivers/usb/gadget/composite.c | 288 ++++++++++++++++++++++++++++++++++++++++- drivers/usb/gadget/u_os_desc.h | 90 +++++++++++++ include/linux/usb/composite.h | 58 +++++++++ 3 files changed, 435 insertions(+), 1 deletion(-) create mode 100644 drivers/usb/gadget/u_os_desc.h (limited to 'include/linux') diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 2f87b1697bf5..042c66b71df8 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -21,6 +21,8 @@ #include #include +#include "u_os_desc.h" + /** * struct usb_os_string - represents OS String to be reported by a gadget * @bLength: total length of the entire descritor, always 0x12 @@ -438,6 +440,7 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; + struct list_head *pos; u8 type = w_value >> 8; enum usb_device_speed speed = USB_SPEED_UNKNOWN; @@ -456,7 +459,20 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) /* This is a lookup by config *INDEX* */ w_value &= 0xff; - list_for_each_entry(c, &cdev->configs, list) { + + pos = &cdev->configs; + c = cdev->os_desc_config; + if (c) + goto check_config; + + while ((pos = pos->next) != &cdev->configs) { + c = list_entry(pos, typeof(*c), list); + + /* skip OS Descriptors config which is handled separately */ + if (c == cdev->os_desc_config) + continue; + +check_config: /* ignore configs that won't work at this speed */ switch (speed) { case USB_SPEED_SUPER: @@ -1236,6 +1252,158 @@ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) req->status, req->actual, req->length); } +static int count_ext_compat(struct usb_configuration *c) +{ + int i, res; + + res = 0; + for (i = 0; i < c->next_interface_id; ++i) { + struct usb_function *f; + int j; + + f = c->interface[i]; + for (j = 0; j < f->os_desc_n; ++j) { + struct usb_os_desc *d; + + if (i != f->os_desc_table[j].if_id) + continue; + d = f->os_desc_table[j].os_desc; + if (d && d->ext_compat_id) + ++res; + } + } + BUG_ON(res > 255); + return res; +} + +static void fill_ext_compat(struct usb_configuration *c, u8 *buf) +{ + int i, count; + + count = 16; + for (i = 0; i < c->next_interface_id; ++i) { + struct usb_function *f; + int j; + + f = c->interface[i]; + for (j = 0; j < f->os_desc_n; ++j) { + struct usb_os_desc *d; + + if (i != f->os_desc_table[j].if_id) + continue; + d = f->os_desc_table[j].os_desc; + if (d && d->ext_compat_id) { + *buf++ = i; + *buf++ = 0x01; + memcpy(buf, d->ext_compat_id, 16); + buf += 22; + } else { + ++buf; + *buf = 0x01; + buf += 23; + } + count += 24; + if (count >= 4096) + return; + } + } +} + +static int count_ext_prop(struct usb_configuration *c, int interface) +{ + struct usb_function *f; + int j, res; + + res = 0; + + f = c->interface[interface]; + for (j = 0; j < f->os_desc_n; ++j) { + struct usb_os_desc *d; + + if (interface != f->os_desc_table[j].if_id) + continue; + d = f->os_desc_table[j].os_desc; + if (d && d->ext_compat_id) + return d->ext_prop_count; + } + return res; +} + +static int len_ext_prop(struct usb_configuration *c, int interface) +{ + struct usb_function *f; + struct usb_os_desc *d; + int j, res; + + res = 10; /* header length */ + f = c->interface[interface]; + for (j = 0; j < f->os_desc_n; ++j) { + if (interface != f->os_desc_table[j].if_id) + continue; + d = f->os_desc_table[j].os_desc; + if (d) + return min(res + d->ext_prop_len, 4096); + } + return res; +} + +static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf) +{ + struct usb_function *f; + struct usb_os_desc *d; + struct usb_os_desc_ext_prop *ext_prop; + int j, count, n, ret; + u8 *start = buf; + + f = c->interface[interface]; + for (j = 0; j < f->os_desc_n; ++j) { + if (interface != f->os_desc_table[j].if_id) + continue; + d = f->os_desc_table[j].os_desc; + if (d) + list_for_each_entry(ext_prop, &d->ext_prop, entry) { + /* 4kB minus header length */ + n = buf - start; + if (n >= 4086) + return 0; + + count = ext_prop->data_len + + ext_prop->name_len + 14; + if (count > 4086 - n) + return -EINVAL; + usb_ext_prop_put_size(buf, count); + usb_ext_prop_put_type(buf, ext_prop->type); + ret = usb_ext_prop_put_name(buf, ext_prop->name, + ext_prop->name_len); + if (ret < 0) + return ret; + switch (ext_prop->type) { + case USB_EXT_PROP_UNICODE: + case USB_EXT_PROP_UNICODE_ENV: + case USB_EXT_PROP_UNICODE_LINK: + usb_ext_prop_put_unicode(buf, ret, + ext_prop->data, + ext_prop->data_len); + break; + case USB_EXT_PROP_BINARY: + usb_ext_prop_put_binary(buf, ret, + ext_prop->data, + ext_prop->data_len); + break; + case USB_EXT_PROP_LE32: + /* not implemented */ + case USB_EXT_PROP_BE32: + /* not implemented */ + default: + return -EINVAL; + } + buf += count; + } + } + + return 0; +} + /* * The setup() callback implements all the ep0 functionality that's * not handled lower down, in hardware or the hardware driver(like @@ -1445,6 +1613,91 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) break; default: unknown: + /* + * OS descriptors handling + */ + if (cdev->use_os_string && cdev->os_desc_config && + (ctrl->bRequest & USB_TYPE_VENDOR) && + ctrl->bRequest == cdev->b_vendor_code) { + struct usb_request *req; + struct usb_configuration *os_desc_cfg; + u8 *buf; + int interface; + int count = 0; + + req = cdev->os_desc_req; + req->complete = composite_setup_complete; + buf = req->buf; + os_desc_cfg = cdev->os_desc_config; + memset(buf, 0, w_length); + buf[5] = 0x01; + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + if (w_index != 0x4 || (w_value >> 8)) + break; + buf[6] = w_index; + if (w_length == 0x10) { + /* Number of ext compat interfaces */ + count = count_ext_compat(os_desc_cfg); + buf[8] = count; + count *= 24; /* 24 B/ext compat desc */ + count += 16; /* header */ + put_unaligned_le32(count, buf); + value = w_length; + } else { + /* "extended compatibility ID"s */ + count = count_ext_compat(os_desc_cfg); + buf[8] = count; + count *= 24; /* 24 B/ext compat desc */ + count += 16; /* header */ + put_unaligned_le32(count, buf); + buf += 16; + fill_ext_compat(os_desc_cfg, buf); + value = w_length; + } + break; + case USB_RECIP_INTERFACE: + if (w_index != 0x5 || (w_value >> 8)) + break; + interface = w_value & 0xFF; + buf[6] = w_index; + if (w_length == 0x0A) { + count = count_ext_prop(os_desc_cfg, + interface); + put_unaligned_le16(count, buf + 8); + count = len_ext_prop(os_desc_cfg, + interface); + put_unaligned_le32(count, buf); + + value = w_length; + } else { + count = count_ext_prop(os_desc_cfg, + interface); + put_unaligned_le16(count, buf + 8); + count = len_ext_prop(os_desc_cfg, + interface); + put_unaligned_le32(count, buf); + buf += 10; + value = fill_ext_prop(os_desc_cfg, + interface, buf); + if (value < 0) + return value; + + value = w_length; + } + break; + } + req->length = value; + req->zero = value < w_length; + value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); + if (value < 0) { + DBG(cdev, "ep_queue --> %d\n", value); + req->status = 0; + composite_setup_complete(gadget->ep0, req); + } + return value; + } + VDBG(cdev, "non-core control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, @@ -1668,6 +1921,29 @@ fail: return ret; } +int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, + struct usb_ep *ep0) +{ + int ret = 0; + + cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); + if (!cdev->os_desc_req) { + ret = PTR_ERR(cdev->os_desc_req); + goto end; + } + + /* OS feature descriptor length <= 4kB */ + cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); + if (!cdev->os_desc_req->buf) { + ret = PTR_ERR(cdev->os_desc_req->buf); + kfree(cdev->os_desc_req); + goto end; + } + cdev->os_desc_req->complete = composite_setup_complete; +end: + return ret; +} + void composite_dev_cleanup(struct usb_composite_dev *cdev) { struct usb_gadget_string_container *uc, *tmp; @@ -1676,6 +1952,10 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev) list_del(&uc->list); kfree(uc); } + if (cdev->os_desc_req) { + kfree(cdev->os_desc_req->buf); + usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req); + } if (cdev->req) { kfree(cdev->req->buf); usb_ep_free_request(cdev->gadget->ep0, cdev->req); @@ -1713,6 +1993,12 @@ static int composite_bind(struct usb_gadget *gadget, if (status < 0) goto fail; + if (cdev->use_os_string) { + status = composite_os_desc_req_prepare(cdev, gadget->ep0); + if (status) + goto fail; + } + update_unchanged_dev_desc(&cdev->desc, composite->dev); /* has userspace failed to provide a serial number? */ diff --git a/drivers/usb/gadget/u_os_desc.h b/drivers/usb/gadget/u_os_desc.h new file mode 100644 index 000000000000..ea5cf8c2da28 --- /dev/null +++ b/drivers/usb/gadget/u_os_desc.h @@ -0,0 +1,90 @@ +/* + * u_os_desc.h + * + * Utility definitions for "OS Descriptors" support + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __U_OS_DESC_H__ +#define __U_OS_DESC_H__ + +#include +#include + +#define USB_EXT_PROP_DW_SIZE 0 +#define USB_EXT_PROP_DW_PROPERTY_DATA_TYPE 4 +#define USB_EXT_PROP_W_PROPERTY_NAME_LENGTH 8 +#define USB_EXT_PROP_B_PROPERTY_NAME 10 +#define USB_EXT_PROP_DW_PROPERTY_DATA_LENGTH 10 +#define USB_EXT_PROP_B_PROPERTY_DATA 14 + +#define USB_EXT_PROP_RESERVED 0 +#define USB_EXT_PROP_UNICODE 1 +#define USB_EXT_PROP_UNICODE_ENV 2 +#define USB_EXT_PROP_BINARY 3 +#define USB_EXT_PROP_LE32 4 +#define USB_EXT_PROP_BE32 5 +#define USB_EXT_PROP_UNICODE_LINK 6 +#define USB_EXT_PROP_UNICODE_MULTI 7 + +static inline void usb_ext_prop_put_size(u8 *buf, int dw_size) +{ + put_unaligned_le32(dw_size, &buf[USB_EXT_PROP_DW_SIZE]); +} + +static inline void usb_ext_prop_put_type(u8 *buf, int type) +{ + put_unaligned_le32(type, &buf[USB_EXT_PROP_DW_PROPERTY_DATA_TYPE]); +} + +static inline int usb_ext_prop_put_name(u8 *buf, const char *name, int pnl) +{ + int result; + + put_unaligned_le16(pnl, &buf[USB_EXT_PROP_W_PROPERTY_NAME_LENGTH]); + result = utf8s_to_utf16s(name, strlen(name), UTF16_LITTLE_ENDIAN, + (wchar_t *) &buf[USB_EXT_PROP_B_PROPERTY_NAME], pnl - 2); + if (result < 0) + return result; + + put_unaligned_le16(0, &buf[USB_EXT_PROP_B_PROPERTY_NAME + pnl]); + + return pnl; +} + +static inline void usb_ext_prop_put_binary(u8 *buf, int pnl, const u8 *data, + int data_len) +{ + put_unaligned_le32(data_len, + &buf[USB_EXT_PROP_DW_PROPERTY_DATA_LENGTH + pnl]); + memcpy(&buf[USB_EXT_PROP_B_PROPERTY_DATA + pnl], data, data_len); +} + +static inline int usb_ext_prop_put_unicode(u8 *buf, int pnl, const char *string, + int data_len) +{ + int result; + put_unaligned_le32(data_len, + &buf[USB_EXT_PROP_DW_PROPERTY_DATA_LENGTH + pnl]); + + result = utf8s_to_utf16s(string, data_len >> 1, UTF16_LITTLE_ENDIAN, + (wchar_t *) &buf[USB_EXT_PROP_B_PROPERTY_DATA + pnl], + data_len - 2); + if (result < 0) + return result; + + put_unaligned_le16(0, + &buf[USB_EXT_PROP_B_PROPERTY_DATA + pnl + data_len]); + + return data_len; +} + +#endif /* __U_OS_DESC_H__ */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 7d29ee9363e8..549f5382b01a 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -56,6 +56,53 @@ #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) struct usb_configuration; +/** + * struct usb_os_desc_ext_prop - describes one "Extended Property" + * @entry: used to keep a list of extended properties + * @type: Extended Property type + * @name_len: Extended Property unicode name length, including terminating '\0' + * @name: Extended Property name + * @data_len: Length of Extended Property blob (for unicode store double len) + * @data: Extended Property blob + */ +struct usb_os_desc_ext_prop { + struct list_head entry; + u8 type; + int name_len; + char *name; + int data_len; + char *data; +}; + +/** + * struct usb_os_desc - describes OS descriptors associated with one interface + * @ext_compat_id: 16 bytes of "Compatible ID" and "Subcompatible ID" + * @ext_prop: Extended Properties list + * @ext_prop_len: Total length of Extended Properties blobs + * @ext_prop_count: Number of Extended Properties + */ +struct usb_os_desc { + char *ext_compat_id; + struct list_head ext_prop; + int ext_prop_len; + int ext_prop_count; +}; + +/** + * struct usb_os_desc_table - describes OS descriptors associated with one + * interface of a usb_function + * @if_id: Interface id + * @os_desc: "Extended Compatibility ID" and "Extended Properties" of the + * interface + * + * Each interface can have at most one "Extended Compatibility ID" and a + * number of "Extended Properties". + */ +struct usb_os_desc_table { + int if_id; + struct usb_os_desc *os_desc; +}; + /** * struct usb_function - describes one function of a configuration * @name: For diagnostics, identifies the function. @@ -73,6 +120,10 @@ struct usb_configuration; * be available at super speed. * @config: assigned when @usb_add_function() is called; this is the * configuration with which this function is associated. + * @os_desc_table: Table of (interface id, os descriptors) pairs. The function + * can expose more than one interface. If an interface is a member of + * an IAD, only the first interface of IAD has its entry in the table. + * @os_desc_n: Number of entries in os_desc_table * @bind: Before the gadget can register, all of its functions bind() to the * available resources including string and interface identifiers used * in interface or class descriptors; endpoints; I/O buffers; and so on. @@ -129,6 +180,9 @@ struct usb_function { struct usb_configuration *config; + struct usb_os_desc_table *os_desc_table; + unsigned os_desc_n; + /* REVISIT: bind() functions can be marked __init, which * makes trouble for section mismatch analysis. See if * we can't restructure things to avoid mismatching. @@ -342,10 +396,12 @@ static inline struct usb_composite_driver *to_cdriver( * struct usb_composite_device - represents one composite usb gadget * @gadget: read-only, abstracts the gadget's usb peripheral controller * @req: used for control responses; buffer is pre-allocated + * @os_desc_req: used for OS descriptors responses; buffer is pre-allocated * @config: the currently active configuration * @qw_sign: qwSignature part of the OS string * @b_vendor_code: bMS_VendorCode part of the OS string * @use_os_string: false by default, interested gadgets set it + * @os_desc_config: the configuration to be used with OS descriptors * * One of these devices is allocated and initialized before the * associated device driver's bind() is called. @@ -375,12 +431,14 @@ static inline struct usb_composite_driver *to_cdriver( struct usb_composite_dev { struct usb_gadget *gadget; struct usb_request *req; + struct usb_request *os_desc_req; struct usb_configuration *config; /* OS String is a custom (yet popular) extension to the USB standard. */ u8 qw_sign[OS_STRING_QW_SIGN_LEN]; u8 b_vendor_code; + struct usb_configuration *os_desc_config; unsigned int use_os_string:1; /* private: */ -- cgit v1.2.3 From da4243145fb197622425d4c2feff5d6422f2391e Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Thu, 8 May 2014 14:06:26 +0200 Subject: usb: gadget: configfs: OS Extended Compatibility descriptors support Add handling of OS Extended Compatibility descriptors from configfs interface. Hosts which expect the "OS Descriptors" ask only for configurations @ index 0, but linux-based USB devices can provide more than one configuration. This patch adds marking one of gadget's configurations the configuration to be reported at index 0, regardless of the actual sequence of usb_add_config invocations used for adding the configurations. The configuration is selected by creating a symbolic link pointing to it from the "os_desc" directory located at the top of a gadget's directory hierarchy. One kind of "OS Descriptors" are "Extended Compatibility Descriptors", which need to be specified per interface. This patch adds interface. directory in function's configfs directory to represent each interface defined by the function. Each interface's directory contains two attributes: "compatible_id" and "sub_compatible_id", which represent 8-byte strings to be reported to the host as the "Compatible ID" and "Sub Compatible ID". Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Felipe Balbi --- Documentation/ABI/testing/configfs-usb-gadget | 13 ++ drivers/usb/gadget/configfs.c | 190 ++++++++++++++++++++++++++ drivers/usb/gadget/configfs.h | 12 ++ include/linux/usb/composite.h | 6 + 4 files changed, 221 insertions(+) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/configfs-usb-gadget b/Documentation/ABI/testing/configfs-usb-gadget index 0e7b786f24ac..5c0b3e6eb981 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget +++ b/Documentation/ABI/testing/configfs-usb-gadget @@ -62,6 +62,19 @@ KernelVersion: 3.11 Description: This group contains functions available to this USB gadget. +What: /config/usb-gadget/gadget/functions/./interface. +Date: May 2014 +KernelVersion: 3.16 +Description: + This group contains "Feature Descriptors" specific for one + gadget's USB interface or one interface group described + by an IAD. + + The attributes: + + compatible_id - 8-byte string for "Compatible ID" + sub_compatible_id - 8-byte string for "Sub Compatible ID" + What: /config/usb-gadget/gadget/strings Date: Jun 2013 KernelVersion: 3.11 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 8b9e038ac22b..fa6cb06cca09 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -6,6 +6,7 @@ #include #include #include "configfs.h" +#include "u_f.h" int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev) @@ -872,10 +873,63 @@ static void os_desc_attr_release(struct config_item *item) kfree(os_desc); } +static int os_desc_link(struct config_item *os_desc_ci, + struct config_item *usb_cfg_ci) +{ + struct gadget_info *gi = container_of(to_config_group(os_desc_ci), + struct gadget_info, os_desc_group); + struct usb_composite_dev *cdev = &gi->cdev; + struct config_usb_cfg *c_target = + container_of(to_config_group(usb_cfg_ci), + struct config_usb_cfg, group); + struct usb_configuration *c; + int ret; + + mutex_lock(&gi->lock); + list_for_each_entry(c, &cdev->configs, list) { + if (c == &c_target->c) + break; + } + if (c != &c_target->c) { + ret = -EINVAL; + goto out; + } + + if (cdev->os_desc_config) { + ret = -EBUSY; + goto out; + } + + cdev->os_desc_config = &c_target->c; + ret = 0; + +out: + mutex_unlock(&gi->lock); + return ret; +} + +static int os_desc_unlink(struct config_item *os_desc_ci, + struct config_item *usb_cfg_ci) +{ + struct gadget_info *gi = container_of(to_config_group(os_desc_ci), + struct gadget_info, os_desc_group); + struct usb_composite_dev *cdev = &gi->cdev; + + mutex_lock(&gi->lock); + if (gi->udc_name) + unregister_gadget(gi); + cdev->os_desc_config = NULL; + WARN_ON(gi->udc_name); + mutex_unlock(&gi->lock); + return 0; +} + static struct configfs_item_operations os_desc_ops = { .release = os_desc_attr_release, .show_attribute = os_desc_attr_show, .store_attribute = os_desc_attr_store, + .allow_link = os_desc_link, + .drop_link = os_desc_unlink, }; static struct config_item_type os_desc_type = { @@ -884,6 +938,133 @@ static struct config_item_type os_desc_type = { .ct_owner = THIS_MODULE, }; +CONFIGFS_ATTR_STRUCT(usb_os_desc); +CONFIGFS_ATTR_OPS(usb_os_desc); + +static struct configfs_item_operations interf_item_ops = { + .show_attribute = usb_os_desc_attr_show, + .store_attribute = usb_os_desc_attr_store, +}; + +static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc, + char *page) +{ + memcpy(page, desc->ext_compat_id, 8); + return 8; +} + +static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc, + const char *page, size_t len) +{ + int l; + + l = min_t(int, 8, len); + if (page[l - 1] == '\n') + --l; + if (desc->opts_mutex) + mutex_lock(desc->opts_mutex); + memcpy(desc->ext_compat_id, page, l); + desc->ext_compat_id[l] = '\0'; + + if (desc->opts_mutex) + mutex_unlock(desc->opts_mutex); + + return len; +} + +static struct usb_os_desc_attribute rndis_grp_attr_compatible_id = + __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR, + rndis_grp_compatible_id_show, + rndis_grp_compatible_id_store); + +static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc, + char *page) +{ + memcpy(page, desc->ext_compat_id + 8, 8); + return 8; +} + +static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc, + const char *page, size_t len) +{ + int l; + + l = min_t(int, 8, len); + if (page[l - 1] == '\n') + --l; + if (desc->opts_mutex) + mutex_lock(desc->opts_mutex); + memcpy(desc->ext_compat_id + 8, page, l); + desc->ext_compat_id[l + 8] = '\0'; + + if (desc->opts_mutex) + mutex_unlock(desc->opts_mutex); + + return len; +} + +static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id = + __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR, + rndis_grp_sub_compatible_id_show, + rndis_grp_sub_compatible_id_store); + +static struct configfs_attribute *interf_grp_attrs[] = { + &rndis_grp_attr_compatible_id.attr, + &rndis_grp_attr_sub_compatible_id.attr, + NULL +}; + +int usb_os_desc_prepare_interf_dir(struct config_group *parent, + int n_interf, + struct usb_os_desc **desc, + struct module *owner) +{ + struct config_group **f_default_groups, *os_desc_group, + **interface_groups; + struct config_item_type *os_desc_type, *interface_type; + + vla_group(data_chunk); + vla_item(data_chunk, struct config_group *, f_default_groups, 2); + vla_item(data_chunk, struct config_group, os_desc_group, 1); + vla_item(data_chunk, struct config_group *, interface_groups, + n_interf + 1); + vla_item(data_chunk, struct config_item_type, os_desc_type, 1); + vla_item(data_chunk, struct config_item_type, interface_type, 1); + + char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); + if (!vlabuf) + return -ENOMEM; + + f_default_groups = vla_ptr(vlabuf, data_chunk, f_default_groups); + os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); + os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); + interface_groups = vla_ptr(vlabuf, data_chunk, interface_groups); + interface_type = vla_ptr(vlabuf, data_chunk, interface_type); + + parent->default_groups = f_default_groups; + os_desc_type->ct_owner = owner; + config_group_init_type_name(os_desc_group, "os_desc", os_desc_type); + f_default_groups[0] = os_desc_group; + + os_desc_group->default_groups = interface_groups; + interface_type->ct_item_ops = &interf_item_ops; + interface_type->ct_attrs = interf_grp_attrs; + interface_type->ct_owner = owner; + + while (n_interf--) { + struct usb_os_desc *d; + + d = desc[n_interf]; + config_group_init_type_name(&d->group, "", interface_type); + config_item_set_name(&d->group.cg_item, "interface.%d", + n_interf); + interface_groups[n_interf] = &d->group; + } + + return 0; +} +EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); + static int configfs_do_nothing(struct usb_composite_dev *cdev) { WARN_ON(1); @@ -893,6 +1074,9 @@ static int configfs_do_nothing(struct usb_composite_dev *cdev) int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *dev); +int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, + struct usb_ep *ep0); + static void purge_configs_funcs(struct gadget_info *gi) { struct usb_configuration *c; @@ -1028,6 +1212,12 @@ static int configfs_composite_bind(struct usb_gadget *gadget, } usb_ep_autoconfig_reset(cdev->gadget); } + if (cdev->use_os_string) { + ret = composite_os_desc_req_prepare(cdev, gadget->ep0); + if (ret) + goto err_purge_funcs; + } + usb_ep_autoconfig_reset(cdev->gadget); return 0; diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index a7b564a913d1..a14ac792c698 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h @@ -1,6 +1,18 @@ #ifndef USB__GADGET__CONFIGFS__H #define USB__GADGET__CONFIGFS__H +#include + void unregister_gadget_item(struct config_item *item); +int usb_os_desc_prepare_interf_dir(struct config_group *parent, + int n_interf, + struct usb_os_desc **desc, + struct module *owner); + +static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) +{ + return container_of(to_config_group(item), struct usb_os_desc, group); +} + #endif /* USB__GADGET__CONFIGFS__H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 549f5382b01a..9c3903d76781 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -80,12 +80,16 @@ struct usb_os_desc_ext_prop { * @ext_prop: Extended Properties list * @ext_prop_len: Total length of Extended Properties blobs * @ext_prop_count: Number of Extended Properties + * @opts_mutex: Optional mutex protecting config data of a usb_function_instance + * @group: Represents OS descriptors associated with an interface in configfs */ struct usb_os_desc { char *ext_compat_id; struct list_head ext_prop; int ext_prop_len; int ext_prop_count; + struct mutex *opts_mutex; + struct config_group group; }; /** @@ -381,6 +385,8 @@ extern void usb_composite_unregister(struct usb_composite_driver *driver); extern void usb_composite_setup_continue(struct usb_composite_dev *cdev); extern int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *cdev); +extern int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, + struct usb_ep *ep0); void composite_dev_cleanup(struct usb_composite_dev *cdev); static inline struct usb_composite_driver *to_cdriver( -- cgit v1.2.3 From 7419485f197c436d41535df78ddea1085042d271 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Thu, 8 May 2014 14:06:28 +0200 Subject: usb: gadget: configfs: OS Extended Properties descriptors support Add handling of OS Extended Properties descriptors from configfs interface. One kind of "OS Descriptors" are "Extended Properties" descriptors, which need to be specified per interface or per group of interfaces described by an IAD. This patch adds support for creating subdirectories in interface. directory located in the function's directory. Names of subdirectories created become names of properties. Each property contains two attributes: "type" and "data". The type can be a numeric value 1..7 while data is a blob interpreted depending on the type specified. The types are: 1 - unicode string 2 - unicode string with environment variables 3 - binary 4 - little-endian 32-bit 5 - big-endian 32-bit 6 - unicode string with a symbolic link 7 - multiple unicode strings Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Felipe Balbi --- Documentation/ABI/testing/configfs-usb-gadget | 21 +++ drivers/usb/gadget/configfs.c | 201 ++++++++++++++++++++++++++ include/linux/usb/composite.h | 4 + 3 files changed, 226 insertions(+) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/configfs-usb-gadget b/Documentation/ABI/testing/configfs-usb-gadget index 5c0b3e6eb981..95a36589a66b 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget +++ b/Documentation/ABI/testing/configfs-usb-gadget @@ -75,6 +75,27 @@ Description: compatible_id - 8-byte string for "Compatible ID" sub_compatible_id - 8-byte string for "Sub Compatible ID" +What: /config/usb-gadget/gadget/functions/./interface./ +Date: May 2014 +KernelVersion: 3.16 +Description: + This group contains "Extended Property Descriptors" specific for one + gadget's USB interface or one interface group described + by an IAD. + + The attributes: + + type - value 1..7 for interpreting the data + 1: unicode string + 2: unicode string with environment variable + 3: binary + 4: little-endian 32-bit + 5: big-endian 32-bit + 6: unicode string with a symbolic link + 7: multiple unicode strings + data - blob of data to be interpreted depending on + type + What: /config/usb-gadget/gadget/strings Date: Jun 2013 KernelVersion: 3.11 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index fa6cb06cca09..2ddcd635ca2a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -7,6 +7,7 @@ #include #include "configfs.h" #include "u_f.h" +#include "u_os_desc.h" int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev) @@ -941,6 +942,204 @@ static struct config_item_type os_desc_type = { CONFIGFS_ATTR_STRUCT(usb_os_desc); CONFIGFS_ATTR_OPS(usb_os_desc); + +static inline struct usb_os_desc_ext_prop +*to_usb_os_desc_ext_prop(struct config_item *item) +{ + return container_of(item, struct usb_os_desc_ext_prop, item); +} + +CONFIGFS_ATTR_STRUCT(usb_os_desc_ext_prop); +CONFIGFS_ATTR_OPS(usb_os_desc_ext_prop); + +static ssize_t ext_prop_type_show(struct usb_os_desc_ext_prop *ext_prop, + char *page) +{ + return sprintf(page, "%d", ext_prop->type); +} + +static ssize_t ext_prop_type_store(struct usb_os_desc_ext_prop *ext_prop, + const char *page, size_t len) +{ + struct usb_os_desc *desc = to_usb_os_desc(ext_prop->item.ci_parent); + u8 type; + int ret; + + if (desc->opts_mutex) + mutex_lock(desc->opts_mutex); + ret = kstrtou8(page, 0, &type); + if (ret) + goto end; + if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI) { + ret = -EINVAL; + goto end; + } + + if ((ext_prop->type == USB_EXT_PROP_BINARY || + ext_prop->type == USB_EXT_PROP_LE32 || + ext_prop->type == USB_EXT_PROP_BE32) && + (type == USB_EXT_PROP_UNICODE || + type == USB_EXT_PROP_UNICODE_ENV || + type == USB_EXT_PROP_UNICODE_LINK)) + ext_prop->data_len <<= 1; + else if ((ext_prop->type == USB_EXT_PROP_UNICODE || + ext_prop->type == USB_EXT_PROP_UNICODE_ENV || + ext_prop->type == USB_EXT_PROP_UNICODE_LINK) && + (type == USB_EXT_PROP_BINARY || + type == USB_EXT_PROP_LE32 || + type == USB_EXT_PROP_BE32)) + ext_prop->data_len >>= 1; + ext_prop->type = type; + ret = len; + +end: + if (desc->opts_mutex) + mutex_unlock(desc->opts_mutex); + return ret; +} + +static ssize_t ext_prop_data_show(struct usb_os_desc_ext_prop *ext_prop, + char *page) +{ + int len = ext_prop->data_len; + + if (ext_prop->type == USB_EXT_PROP_UNICODE || + ext_prop->type == USB_EXT_PROP_UNICODE_ENV || + ext_prop->type == USB_EXT_PROP_UNICODE_LINK) + len >>= 1; + memcpy(page, ext_prop->data, len); + + return len; +} + +static ssize_t ext_prop_data_store(struct usb_os_desc_ext_prop *ext_prop, + const char *page, size_t len) +{ + struct usb_os_desc *desc = to_usb_os_desc(ext_prop->item.ci_parent); + char *new_data; + size_t ret_len = len; + + if (page[len - 1] == '\n' || page[len - 1] == '\0') + --len; + new_data = kzalloc(len, GFP_KERNEL); + if (!new_data) + return -ENOMEM; + + memcpy(new_data, page, len); + + if (desc->opts_mutex) + mutex_lock(desc->opts_mutex); + kfree(ext_prop->data); + ext_prop->data = new_data; + desc->ext_prop_len -= ext_prop->data_len; + ext_prop->data_len = len; + desc->ext_prop_len += ext_prop->data_len; + if (ext_prop->type == USB_EXT_PROP_UNICODE || + ext_prop->type == USB_EXT_PROP_UNICODE_ENV || + ext_prop->type == USB_EXT_PROP_UNICODE_LINK) { + desc->ext_prop_len -= ext_prop->data_len; + ext_prop->data_len <<= 1; + ext_prop->data_len += 2; + desc->ext_prop_len += ext_prop->data_len; + } + if (desc->opts_mutex) + mutex_unlock(desc->opts_mutex); + return ret_len; +} + +static struct usb_os_desc_ext_prop_attribute ext_prop_type = + __CONFIGFS_ATTR(type, S_IRUGO | S_IWUSR, + ext_prop_type_show, ext_prop_type_store); + +static struct usb_os_desc_ext_prop_attribute ext_prop_data = + __CONFIGFS_ATTR(data, S_IRUGO | S_IWUSR, + ext_prop_data_show, ext_prop_data_store); + +static struct configfs_attribute *ext_prop_attrs[] = { + &ext_prop_type.attr, + &ext_prop_data.attr, + NULL, +}; + +static void usb_os_desc_ext_prop_release(struct config_item *item) +{ + struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item); + + kfree(ext_prop); /* frees a whole chunk */ +} + +static struct configfs_item_operations ext_prop_ops = { + .release = usb_os_desc_ext_prop_release, + .show_attribute = usb_os_desc_ext_prop_attr_show, + .store_attribute = usb_os_desc_ext_prop_attr_store, +}; + +static struct config_item *ext_prop_make( + struct config_group *group, + const char *name) +{ + struct usb_os_desc_ext_prop *ext_prop; + struct config_item_type *ext_prop_type; + struct usb_os_desc *desc; + char *vlabuf; + + vla_group(data_chunk); + vla_item(data_chunk, struct usb_os_desc_ext_prop, ext_prop, 1); + vla_item(data_chunk, struct config_item_type, ext_prop_type, 1); + + vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); + if (!vlabuf) + return ERR_PTR(-ENOMEM); + + ext_prop = vla_ptr(vlabuf, data_chunk, ext_prop); + ext_prop_type = vla_ptr(vlabuf, data_chunk, ext_prop_type); + + desc = container_of(group, struct usb_os_desc, group); + ext_prop_type->ct_item_ops = &ext_prop_ops; + ext_prop_type->ct_attrs = ext_prop_attrs; + ext_prop_type->ct_owner = desc->owner; + + config_item_init_type_name(&ext_prop->item, name, ext_prop_type); + + ext_prop->name = kstrdup(name, GFP_KERNEL); + if (!ext_prop->name) { + kfree(vlabuf); + return ERR_PTR(-ENOMEM); + } + desc->ext_prop_len += 14; + ext_prop->name_len = 2 * strlen(ext_prop->name) + 2; + if (desc->opts_mutex) + mutex_lock(desc->opts_mutex); + desc->ext_prop_len += ext_prop->name_len; + list_add_tail(&ext_prop->entry, &desc->ext_prop); + ++desc->ext_prop_count; + if (desc->opts_mutex) + mutex_unlock(desc->opts_mutex); + + return &ext_prop->item; +} + +static void ext_prop_drop(struct config_group *group, struct config_item *item) +{ + struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item); + struct usb_os_desc *desc = to_usb_os_desc(&group->cg_item); + + if (desc->opts_mutex) + mutex_lock(desc->opts_mutex); + list_del(&ext_prop->entry); + --desc->ext_prop_count; + kfree(ext_prop->name); + desc->ext_prop_len -= (ext_prop->name_len + ext_prop->data_len + 14); + if (desc->opts_mutex) + mutex_unlock(desc->opts_mutex); + config_item_put(item); +} + +static struct configfs_group_operations interf_grp_ops = { + .make_item = &ext_prop_make, + .drop_item = &ext_prop_drop, +}; + static struct configfs_item_operations interf_item_ops = { .show_attribute = usb_os_desc_attr_show, .store_attribute = usb_os_desc_attr_store, @@ -1048,6 +1247,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, os_desc_group->default_groups = interface_groups; interface_type->ct_item_ops = &interf_item_ops; + interface_type->ct_group_ops = &interf_grp_ops; interface_type->ct_attrs = interf_grp_attrs; interface_type->ct_owner = owner; @@ -1055,6 +1255,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, struct usb_os_desc *d; d = desc[n_interf]; + d->owner = owner; config_group_init_type_name(&d->group, "", interface_type); config_item_set_name(&d->group.cg_item, "interface.%d", n_interf); diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 9c3903d76781..7373203140e7 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -64,6 +64,7 @@ struct usb_configuration; * @name: Extended Property name * @data_len: Length of Extended Property blob (for unicode store double len) * @data: Extended Property blob + * @item: Represents this Extended Property in configfs */ struct usb_os_desc_ext_prop { struct list_head entry; @@ -72,6 +73,7 @@ struct usb_os_desc_ext_prop { char *name; int data_len; char *data; + struct config_item item; }; /** @@ -82,6 +84,7 @@ struct usb_os_desc_ext_prop { * @ext_prop_count: Number of Extended Properties * @opts_mutex: Optional mutex protecting config data of a usb_function_instance * @group: Represents OS descriptors associated with an interface in configfs + * @owner: Module associated with this OS descriptor */ struct usb_os_desc { char *ext_compat_id; @@ -90,6 +93,7 @@ struct usb_os_desc { int ext_prop_count; struct mutex *opts_mutex; struct config_group group; + struct module *owner; }; /** -- cgit v1.2.3 From afea227fd4acf4f097a9e77bbc2f07d4856ebd01 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Mar 2014 07:10:41 -0700 Subject: rcutorture: Export RCU grace-period kthread wait state to rcutorture This commit allows rcutorture to print additional state for the RCU grace-period kthreads in cases where RCU seems reluctant to start a new grace period. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcutiny.h | 4 ++++ include/linux/rcutree.h | 1 + kernel/rcu/rcutorture.c | 1 + kernel/rcu/tree.c | 17 +++++++++++++++++ kernel/rcu/tree.h | 8 +++++++- 5 files changed, 30 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 425c659d54e5..d40a6a451330 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -119,6 +119,10 @@ static inline void rcu_sched_force_quiescent_state(void) { } +static inline void show_rcu_gp_kthreads(void) +{ +} + static inline void rcu_cpu_stall_reset(void) { } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index a59ca05fd4e3..3e2f5d432743 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -84,6 +84,7 @@ extern unsigned long rcutorture_vernum; long rcu_batches_completed(void); long rcu_batches_completed_bh(void); long rcu_batches_completed_sched(void); +void show_rcu_gp_kthreads(void); void rcu_force_quiescent_state(void); void rcu_bh_force_quiescent_state(void); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 9decce0f110c..37ae5e1d4a1d 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1034,6 +1034,7 @@ rcu_torture_printk(char *page) "??? Writer stall state %d g%lu c%lu f%#x\n", rcu_torture_writer_state, gpnum, completed, flags); + show_rcu_gp_kthreads(); rcutorture_trace_dump(); } rtcv_snap = rcu_torture_current_version; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3d15b5a82ae8..93e64381aa2a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -279,6 +279,21 @@ void rcu_bh_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +/* + * Show the state of the grace-period kthreads. + */ +void show_rcu_gp_kthreads(void) +{ + struct rcu_state *rsp; + + for_each_rcu_flavor(rsp) { + pr_info("%s: wait state: %d ->state: %#lx\n", + rsp->name, rsp->gp_state, rsp->gp_kthread->state); + /* sched_show_task(rsp->gp_kthread); */ + } +} +EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); + /* * Record the number of times rcutorture tests have been initiated and * terminated. This information allows the debugfs tracing stats to be @@ -1626,6 +1641,7 @@ static int __noreturn rcu_gp_kthread(void *arg) trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), TPS("reqwait")); + rsp->gp_state = RCU_GP_WAIT_GPS; wait_event_interruptible(rsp->gp_wq, ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_INIT); @@ -1653,6 +1669,7 @@ static int __noreturn rcu_gp_kthread(void *arg) trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), TPS("fqswait")); + rsp->gp_state = RCU_GP_WAIT_FQS; ret = wait_event_interruptible_timeout(rsp->gp_wq, ((gf = ACCESS_ONCE(rsp->gp_flags)) & RCU_GP_FLAG_FQS) || diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 75dc3c39a02a..c2fd1e722879 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -406,7 +406,8 @@ struct rcu_state { unsigned long completed; /* # of last completed gp. */ struct task_struct *gp_kthread; /* Task for grace periods. */ wait_queue_head_t gp_wq; /* Where GP task waits. */ - int gp_flags; /* Commands for GP task. */ + short gp_flags; /* Commands for GP task. */ + short gp_state; /* GP kthread sleep state. */ /* End of fields guarded by root rcu_node's lock. */ @@ -469,6 +470,11 @@ struct rcu_state { #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ +/* Values for rcu_state structure's gp_flags field. */ +#define RCU_GP_WAIT_INIT 0 /* Initial state. */ +#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ +#define RCU_GP_WAIT_FQS 2 /* Wait for force-quiescent-state time. */ + extern struct list_head rcu_struct_flavors; /* Sequence through rcu_state structures for each RCU flavor. */ -- cgit v1.2.3 From 0e980234c97f98be6619b9281d83777f725b94ff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 16 Apr 2014 10:07:09 -0700 Subject: percpu: Fix raw_cpu_inc_return() The definition for raw_cpu_add_return() uses the operation prefix "raw_add_return_", but the definitions in the various percpu.h files expect "raw_cpu_add_return_". This commit therefore appropriately adjusts the definition of raw_cpu_add_return(). Signed-off-by: Paul E. McKenney Acked-by: Christoph Lameter Reviewed-by: Josh Triplett --- include/linux/percpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index e7a0b95ed527..495c6543a8f2 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -639,7 +639,7 @@ do { \ # define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) # endif # define raw_cpu_add_return(pcp, val) \ - __pcpu_size_call_return2(raw_add_return_, pcp, val) + __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) #endif #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) -- cgit v1.2.3 From ac1bea85781e9004da9b3e8a4b097c18492d857c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 16 Mar 2014 21:36:25 -0700 Subject: sched,rcu: Make cond_resched() report RCU quiescent states Given a CPU running a loop containing cond_resched(), with no other tasks runnable on that CPU, RCU will eventually report RCU CPU stall warnings due to lack of quiescent states. Fortunately, every call to cond_resched() is a perfectly good quiescent state. Unfortunately, invoking rcu_note_context_switch() is a bit heavyweight for cond_resched(), especially given the need to disable preemption, and, for RCU-preempt, interrupts as well. This commit therefore maintains a per-CPU counter that causes cond_resched(), cond_resched_lock(), and cond_resched_softirq() to call rcu_note_context_switch(), but only about once per 256 invocations. This ratio was chosen in keeping with the relative time constants of RCU grace periods. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 36 ++++++++++++++++++++++++++++++++++++ kernel/rcu/update.c | 18 ++++++++++++++++++ kernel/sched/core.c | 7 ++++++- 3 files changed, 60 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 82973738125b..97cc8d6679b4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -44,6 +44,7 @@ #include #include #include +#include #include extern int rcu_expedited; /* for sysctl */ @@ -286,6 +287,41 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ +/* + * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. + */ + +#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */ +DECLARE_PER_CPU(int, rcu_cond_resched_count); +void rcu_resched(void); + +/* + * Is it time to report RCU quiescent states? + * + * Note unsynchronized access to rcu_cond_resched_count. Yes, we might + * increment some random CPU's count, and possibly also load the result from + * yet another CPU's count. We might even clobber some other CPU's attempt + * to zero its counter. This is all OK because the goal is not precision, + * but rather reasonable amortization of rcu_note_context_switch() overhead + * and extremely high probability of avoiding RCU CPU stall warnings. + * Note that this function has to be preempted in just the wrong place, + * many thousands of times in a row, for anything bad to happen. + */ +static inline bool rcu_should_resched(void) +{ + return raw_cpu_inc_return(rcu_cond_resched_count) >= + RCU_COND_RESCHED_LIM; +} + +/* + * Report quiscent states to RCU if it is time to do so. + */ +static inline void rcu_cond_resched(void) +{ + if (unlikely(rcu_should_resched())) + rcu_resched(); +} + /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 4c0a9b0af469..ed7a0d72562c 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -338,3 +338,21 @@ static int __init check_cpu_stall_init(void) early_initcall(check_cpu_stall_init); #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ + +/* + * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. + */ + +DEFINE_PER_CPU(int, rcu_cond_resched_count); + +/* + * Report a set of RCU quiescent states, for use by cond_resched() + * and friends. Out of line due to being called infrequently. + */ +void rcu_resched(void) +{ + preempt_disable(); + __this_cpu_write(rcu_cond_resched_count, 0); + rcu_note_context_switch(smp_processor_id()); + preempt_enable(); +} diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..9f530c9ed911 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4051,6 +4051,7 @@ static void __cond_resched(void) int __sched _cond_resched(void) { + rcu_cond_resched(); if (should_resched()) { __cond_resched(); return 1; @@ -4069,15 +4070,18 @@ EXPORT_SYMBOL(_cond_resched); */ int __cond_resched_lock(spinlock_t *lock) { + bool need_rcu_resched = rcu_should_resched(); int resched = should_resched(); int ret = 0; lockdep_assert_held(lock); - if (spin_needbreak(lock) || resched) { + if (spin_needbreak(lock) || resched || need_rcu_resched) { spin_unlock(lock); if (resched) __cond_resched(); + else if (unlikely(need_rcu_resched)) + rcu_resched(); else cpu_relax(); ret = 1; @@ -4091,6 +4095,7 @@ int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); + rcu_cond_resched(); /* BH disabled OK, just recording QSes. */ if (should_resched()) { local_bh_enable(); __cond_resched(); -- cgit v1.2.3 From 5228084eed8d54c426c7abde3be66daf8e1b0e57 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 7 Apr 2014 09:14:11 -0700 Subject: torture: Check for multiple concurrent torture tests The torture tests are designed to run in isolation, but do not enforce this isolation. This commit therefore checks for concurrent torture tests, and refuses to start new tests while old tests are running. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/torture.h | 2 +- kernel/locking/locktorture.c | 3 ++- kernel/rcu/rcutorture.c | 3 ++- kernel/torture.c | 13 +++++++++++-- 4 files changed, 16 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/torture.h b/include/linux/torture.h index b2e2b468e511..f998574247fd 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -81,7 +81,7 @@ void stutter_wait(const char *title); int torture_stutter_init(int s); /* Initialization and cleanup. */ -void torture_init_begin(char *ttype, bool v, int *runnable); +bool torture_init_begin(char *ttype, bool v, int *runnable); void torture_init_end(void); bool torture_cleanup(void); bool torture_must_stop(void); diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 1952466c7db5..dbafeac18e4d 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -355,7 +355,8 @@ static int __init lock_torture_init(void) &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, }; - torture_init_begin(torture_type, verbose, &locktorture_runnable); + if (!torture_init_begin(torture_type, verbose, &locktorture_runnable)) + return -EBUSY; /* Process args and tell the world that the torturer is on the job. */ for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 4b7b97ff1195..7fa34f86e5ba 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1536,7 +1536,8 @@ rcu_torture_init(void) &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops, }; - torture_init_begin(torture_type, verbose, &rcutorture_runnable); + if (!torture_init_begin(torture_type, verbose, &rcutorture_runnable)) + return -EBUSY; /* Process args and tell the world that the torturer is on the job. */ for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { diff --git a/kernel/torture.c b/kernel/torture.c index ae1723a4c751..0ed0b49d2ce1 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -599,14 +599,20 @@ static void torture_stutter_cleanup(void) * The runnable parameter points to a flag that controls whether or not * the test is currently runnable. If there is no such flag, pass in NULL. */ -void __init torture_init_begin(char *ttype, bool v, int *runnable) +bool __init torture_init_begin(char *ttype, bool v, int *runnable) { mutex_lock(&fullstop_mutex); + if (torture_type != NULL) { + pr_alert("torture_init_begin: refusing %s init: %s running", + ttype, torture_type); + mutex_unlock(&fullstop_mutex); + return false; + } torture_type = ttype; verbose = v; torture_runnable = runnable; fullstop = FULLSTOP_DONTSTOP; - + return true; } EXPORT_SYMBOL_GPL(torture_init_begin); @@ -645,6 +651,9 @@ bool torture_cleanup(void) torture_shuffle_cleanup(); torture_stutter_cleanup(); torture_onoff_cleanup(); + mutex_lock(&fullstop_mutex); + torture_type = NULL; + mutex_unlock(&fullstop_mutex); return false; } EXPORT_SYMBOL_GPL(torture_cleanup); -- cgit v1.2.3 From 6348675c4e3612e001860354fea78258e041d9a1 Mon Sep 17 00:00:00 2001 From: Pranith Kumar Date: Wed, 16 Apr 2014 16:46:01 -0400 Subject: torture: Remove unused definition The torture_parm() macro is the same as torture_param(), and torture_parm() is not used. This commit therefore removes torture_parm(). Signed-off-by: Pranith Kumar Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/torture.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/torture.h b/include/linux/torture.h index f998574247fd..5ca58fcbaf1b 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -49,12 +49,6 @@ #define VERBOSE_TOROUT_ERRSTRING(s) \ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) -/* Definitions for a non-string torture-test module parameter. */ -#define torture_parm(type, name, init, msg) \ - static type name = init; \ - module_param(name, type, 0444); \ - MODULE_PARM_DESC(name, msg); - /* Definitions for online/offline exerciser. */ int torture_onoff_init(long ooholdoff, long oointerval); char *torture_onoff_stats(char *page); -- cgit v1.2.3 From a97181adf1502128e2945b4fef2591249c565467 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 12 May 2014 14:04:47 +0200 Subject: clk: sunxi: Fixup clk_sunxi_mmc_phase_control to take a clk rather then a hw_clk __clk_get_hw is supposed to be used by clk providers, not clk consumers. Signed-off-by: Hans de Goede Reviewed-by: Ulf Hansson Signed-off-by: Mike Turquette --- drivers/clk/sunxi/clk-sunxi.c | 3 ++- include/linux/clk/sunxi.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 59f90401b900..4cc2b2a5aa75 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -510,11 +510,12 @@ CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk", * clk_sunxi_mmc_phase_control() - configures MMC clock phase control */ -void clk_sunxi_mmc_phase_control(struct clk_hw *hw, u8 sample, u8 output) +void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output) { #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw) #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw) + struct clk_hw *hw = __clk_get_hw(clk); struct clk_composite *composite = to_clk_composite(hw); struct clk_hw *rate_hw = composite->rate_hw; struct clk_factors *factors = to_clk_factors(rate_hw); diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h index 1ef5c899e458..aed28c4451d9 100644 --- a/include/linux/clk/sunxi.h +++ b/include/linux/clk/sunxi.h @@ -17,6 +17,6 @@ #include -void clk_sunxi_mmc_phase_control(struct clk_hw *hw, u8 sample, u8 output); +void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output); #endif -- cgit v1.2.3 From c6e126de43e7d4abfd6cf796b40589db3a046167 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Thu, 15 May 2014 16:55:24 +0100 Subject: of: Keep track of populated platform devices In "Device Tree powered" systems, platform devices are usually massively populated with of_platform_populate() call, executed at some level of initcalls, either by generic architecture or by platform-specific code. There are situations though where certain devices must be created (and bound with drivers) before all the others. This presents a challenge, as devices created explicitly would be created again by of_platform_populate(). This patch tries to solve that issue in a generic way, adding a "populated" flag for a DT node description. Subsequent of_platform_populate() will skip such nodes (and its children) in a similar way to the non-available ones. This patch also adds of_platform_depopulate() as an operation complementary to the _populate() one. It removes a platform or an amba device populated from the Device Tree, together with its all children (leaving, however, devices without associated of_node untouched) clearing the "populated" flag on the way. Signed-off-by: Pawel Moll Reviewed-by: Rob Herring Acked-by: Grant Likely --- drivers/of/platform.c | 74 ++++++++++++++++++++++++++++++++++++++++++--- include/linux/of.h | 7 +++++ include/linux/of_platform.h | 5 +++ 3 files changed, 81 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/platform.c b/drivers/of/platform.c index bd47fbc53dc9..e8376d646d98 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -206,12 +206,13 @@ static struct platform_device *of_platform_device_create_pdata( { struct platform_device *dev; - if (!of_device_is_available(np)) + if (!of_device_is_available(np) || + of_node_test_and_set_flag(np, OF_POPULATED)) return NULL; dev = of_device_alloc(np, bus_id, parent); if (!dev) - return NULL; + goto err_clear_flag; #if defined(CONFIG_MICROBLAZE) dev->archdata.dma_mask = 0xffffffffUL; @@ -229,10 +230,14 @@ static struct platform_device *of_platform_device_create_pdata( if (of_device_add(dev) != 0) { platform_device_put(dev); - return NULL; + goto err_clear_flag; } return dev; + +err_clear_flag: + of_node_clear_flag(np, OF_POPULATED); + return NULL; } /** @@ -264,14 +269,15 @@ static struct amba_device *of_amba_device_create(struct device_node *node, pr_debug("Creating amba device %s\n", node->full_name); - if (!of_device_is_available(node)) + if (!of_device_is_available(node) || + of_node_test_and_set_flag(node, OF_POPULATED)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) { pr_err("%s(): amba_device_alloc() failed for %s\n", __func__, node->full_name); - return NULL; + goto err_clear_flag; } /* setup generic device info */ @@ -311,6 +317,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node, err_free: amba_device_put(dev); +err_clear_flag: + of_node_clear_flag(node, OF_POPULATED); return NULL; } #else /* CONFIG_ARM_AMBA */ @@ -487,4 +495,60 @@ int of_platform_populate(struct device_node *root, return rc; } EXPORT_SYMBOL_GPL(of_platform_populate); + +static int of_platform_device_destroy(struct device *dev, void *data) +{ + bool *children_left = data; + + /* Do not touch devices not populated from the device tree */ + if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) { + *children_left = true; + return 0; + } + + /* Recurse, but don't touch this device if it has any children left */ + if (of_platform_depopulate(dev) != 0) { + *children_left = true; + return 0; + } + + if (dev->bus == &platform_bus_type) + platform_device_unregister(to_platform_device(dev)); +#ifdef CONFIG_ARM_AMBA + else if (dev->bus == &amba_bustype) + amba_device_unregister(to_amba_device(dev)); +#endif + else { + *children_left = true; + return 0; + } + + of_node_clear_flag(dev->of_node, OF_POPULATED); + + return 0; +} + +/** + * of_platform_depopulate() - Remove devices populated from device tree + * @parent: device which childred will be removed + * + * Complementary to of_platform_populate(), this function removes children + * of the given device (and, recurrently, their children) that have been + * created from their respective device tree nodes (and only those, + * leaving others - eg. manually created - unharmed). + * + * Returns 0 when all children devices have been removed or + * -EBUSY when some children remained. + */ +int of_platform_depopulate(struct device *parent) +{ + bool children_left = false; + + device_for_each_child(parent, &children_left, + of_platform_device_destroy); + + return children_left ? -EBUSY : 0; +} +EXPORT_SYMBOL_GPL(of_platform_depopulate); + #endif /* CONFIG_OF_ADDRESS */ diff --git a/include/linux/of.h b/include/linux/of.h index 3bad8d106e0e..4c50d0b78b89 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -130,6 +130,12 @@ static inline int of_node_check_flag(struct device_node *n, unsigned long flag) return test_bit(flag, &n->_flags); } +static inline int of_node_test_and_set_flag(struct device_node *n, + unsigned long flag) +{ + return test_and_set_bit(flag, &n->_flags); +} + static inline void of_node_set_flag(struct device_node *n, unsigned long flag) { set_bit(flag, &n->_flags); @@ -197,6 +203,7 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) /* flag descriptions */ #define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ #define OF_DETACHED 2 /* node has been detached from the device tree */ +#define OF_POPULATED 3 /* device already created for the node */ #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 05cb4a928252..b1010eeaac0d 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -72,6 +72,7 @@ extern int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent); +extern int of_platform_depopulate(struct device *parent); #else static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, @@ -80,6 +81,10 @@ static inline int of_platform_populate(struct device_node *root, { return -ENODEV; } +static inline int of_platform_depopulate(struct device *parent) +{ + return -ENODEV; +} #endif #endif /* _LINUX_OF_PLATFORM_H */ -- cgit v1.2.3 From 3b9334ac835bb431e2186645230c9f1eb94b5d49 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Wed, 30 Apr 2014 16:46:29 +0100 Subject: mfd: vexpress: Convert custom func API to regmap Components of the Versatile Express platform (configuration microcontrollers on motherboard and daughterboards in particular) talk to each other over a custom configuration bus. They provide miscellaneous functions (from clock generator control to energy sensors) which are represented as platform devices (and Device Tree nodes). The transactions on the bus can be generated by different "bridges" in the system, some of which are universal for the whole platform (for the price of high transfer latencies), others restricted to a subsystem (but much faster). Until now drivers for such functions were using custom "func" API, which is being replaced in this patch by regmap calls. This required: * a rework (and move to drivers/bus directory, as suggested by Samuel and Arnd) of the config bus core, which is much simpler now and uses device model infrastructure (class) to keep track of the bridges; non-DT case (soon to be retired anyway) is simply covered by a special device registration function * the new config-bus driver also takes over device population, so there is no need for special matching table for of_platform_populate nor "simple-bus" hack in the arm64 model dtsi file (relevant bindings documentation has been updated); this allows all the vexpress devices fit into normal device model, making it possible to remove plenty of early inits and other hacks in the near future * adaptation of the syscfg bridge implementation in the sysreg driver, again making it much simpler; there is a special case of the "energy" function spanning two registers, where they should be both defined in the tree now, but backward compatibility is maintained in the code * modification of the relevant drivers: * hwmon - just a straight-forward API change * power/reset driver - API change * regulator - API change plus error handling simplification * osc clock driver - this one required larger rework in order to turn in into a standard platform driver Signed-off-by: Pawel Moll Acked-by: Mark Brown Acked-by: Lee Jones Acked-by: Guenter Roeck Acked-by: Mike Turquette --- .../devicetree/bindings/arm/vexpress-sysreg.txt | 43 ++- Documentation/devicetree/bindings/arm/vexpress.txt | 15 +- arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts | 5 +- arch/arm/mach-vexpress/ct-ca9x4.c | 10 +- arch/arm/mach-vexpress/v2m.c | 18 +- arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi | 2 +- drivers/bus/Kconfig | 9 + drivers/bus/Makefile | 2 + drivers/bus/vexpress-config.c | 202 +++++++++++ drivers/clk/versatile/clk-vexpress-osc.c | 96 +++-- drivers/hwmon/vexpress.c | 17 +- drivers/mfd/Makefile | 2 +- drivers/mfd/vexpress-config.c | 287 --------------- drivers/mfd/vexpress-sysreg.c | 395 +++++++++++---------- drivers/power/reset/vexpress-poweroff.c | 16 +- drivers/regulator/vexpress.c | 50 +-- include/linux/vexpress.h | 79 +---- 17 files changed, 568 insertions(+), 680 deletions(-) create mode 100644 drivers/bus/vexpress-config.c delete mode 100644 drivers/mfd/vexpress-config.c (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt b/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt index 5580e9c4bd85..57b423f78995 100644 --- a/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt +++ b/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt @@ -27,24 +27,45 @@ Example: This block also can also act a bridge to the platform's configuration bus via "system control" interface, addressing devices with site number, position in the board stack, config controller, function and device -numbers - see motherboard's TRM for more details. - -The node describing a config device must refer to the sysreg node via -"arm,vexpress,config-bridge" phandle (can be also defined in the node's -parent) and relies on the board topology properties - see main vexpress -node documentation for more details. It must also define the following -property: -- arm,vexpress-sysreg,func : must contain two cells: - - first cell defines function number (eg. 1 for clock generator, - 2 for voltage regulators etc.) - - device number (eg. osc 0, osc 1 etc.) +numbers - see motherboard's TRM for more details. All configuration +controller accessible via this interface must reference the sysreg +node via "arm,vexpress,config-bridge" phandle and define appropriate +topology properties - see main vexpress node documentation for more +details. Each child of such node describes one function and must +define the following properties: +- compatible value : must be one of (corresponding to the TRM): + "arm,vexpress-amp" + "arm,vexpress-dvimode" + "arm,vexpress-energy" + "arm,vexpress-muxfpga" + "arm,vexpress-osc" + "arm,vexpress-power" + "arm,vexpress-reboot" + "arm,vexpress-reset" + "arm,vexpress-scc" + "arm,vexpress-shutdown" + "arm,vexpress-temp" + "arm,vexpress-volt" +- arm,vexpress-sysreg,func : must contain a set of two cells long groups: + - first cell of each group defines the function number + (eg. 1 for clock generator, 2 for voltage regulators etc.) + - second cell of each group defines device number (eg. osc 0, + osc 1 etc.) + - some functions (eg. energy meter, with its 64 bit long counter) + are using more than one function/device number pair Example: mcc { + compatible = "arm,vexpress,config-bus"; arm,vexpress,config-bridge = <&v2m_sysreg>; osc@0 { compatible = "arm,vexpress-osc"; arm,vexpress-sysreg,func = <1 0>; }; + + energy@0 { + compatible = "arm,vexpress-energy"; + arm,vexpress-sysreg,func = <13 0>, <13 1>; + }; }; diff --git a/Documentation/devicetree/bindings/arm/vexpress.txt b/Documentation/devicetree/bindings/arm/vexpress.txt index ae49161e478a..39844cd0bcce 100644 --- a/Documentation/devicetree/bindings/arm/vexpress.txt +++ b/Documentation/devicetree/bindings/arm/vexpress.txt @@ -80,12 +80,17 @@ but also control clock generators, voltage regulators, gather environmental data like temperature, power consumption etc. Even the video output switch (FPGA) is controlled that way. -Nodes describing devices controlled by this infrastructure should -point at the bridge device node: +The controllers are not mapped into normal memory address space +and must be accessed through bridges - other devices capable +of generating transactions on the configuration bus. + +The nodes describing configuration controllers must define +the following properties: +- compatible value: + compatible = "arm,vexpress,config-bus"; - bridge phandle: arm,vexpress,config-bridge = ; -This property can be also defined in a parent node (eg. for a DCC) -and is effective for all children. +and children describing available functions. Platform topology @@ -197,7 +202,7 @@ Example of a VE tile description (simplified) }; dcc { - compatible = "simple-bus"; + compatible = "arm,vexpress,config-bus"; arm,vexpress,config-bridge = <&v2m_sysreg>; osc@0 { diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 15f98cbcb75a..a25c262326dc 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -312,6 +312,7 @@ arm,vexpress-sysreg,func = <12 0>; label = "A15 Pcore"; }; + power@1 { /* Total power for the three A7 cores */ compatible = "arm,vexpress-power"; @@ -322,14 +323,14 @@ energy@0 { /* Total energy for the two A15 cores */ compatible = "arm,vexpress-energy"; - arm,vexpress-sysreg,func = <13 0>; + arm,vexpress-sysreg,func = <13 0>, <13 1>; label = "A15 Jcore"; }; energy@2 { /* Total energy for the three A7 cores */ compatible = "arm,vexpress-energy"; - arm,vexpress-sysreg,func = <13 2>; + arm,vexpress-sysreg,func = <13 2>, <13 3>; label = "A7 Jcore"; }; }; diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 6f34497a4245..35e394aa00e5 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -128,6 +128,10 @@ static struct platform_device pmu_device = { .resource = pmu_resources, }; +static struct clk_lookup osc1_lookup = { + .dev_id = "ct:clcd", +}; + static struct platform_device osc1_device = { .name = "vexpress-osc", .id = 1, @@ -135,6 +139,7 @@ static struct platform_device osc1_device = { .resource = (struct resource []) { VEXPRESS_RES_FUNC(0xf, 1), }, + .dev.platform_data = &osc1_lookup, }; static void __init ct_ca9x4_init(void) @@ -155,10 +160,7 @@ static void __init ct_ca9x4_init(void) amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); platform_device_register(&pmu_device); - platform_device_register(&osc1_device); - - WARN_ON(clk_register_clkdev(vexpress_osc_setup(&osc1_device.dev), - NULL, "ct:clcd")); + vexpress_sysreg_config_device_register(&osc1_device); } #ifdef CONFIG_SMP diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index 4f8b8cb17ff5..ac95220a5019 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c @@ -340,11 +340,6 @@ static void __init v2m_init(void) regulator_register_fixed(0, v2m_eth_supplies, ARRAY_SIZE(v2m_eth_supplies)); - platform_device_register(&v2m_muxfpga_device); - platform_device_register(&v2m_shutdown_device); - platform_device_register(&v2m_reboot_device); - platform_device_register(&v2m_dvimode_device); - platform_device_register(&v2m_sysreg_device); platform_device_register(&v2m_pcie_i2c_device); platform_device_register(&v2m_ddc_i2c_device); @@ -356,6 +351,11 @@ static void __init v2m_init(void) for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++) amba_device_register(v2m_amba_devs[i], &iomem_resource); + vexpress_sysreg_config_device_register(&v2m_muxfpga_device); + vexpress_sysreg_config_device_register(&v2m_shutdown_device); + vexpress_sysreg_config_device_register(&v2m_reboot_device); + vexpress_sysreg_config_device_register(&v2m_dvimode_device); + ct_desc->init_tile(); } @@ -423,17 +423,11 @@ void __init v2m_dt_init_early(void) versatile_sched_clock_init(vexpress_get_24mhz_clock_base(), 24000000); } -static const struct of_device_id v2m_dt_bus_match[] __initconst = { - { .compatible = "simple-bus", }, - { .compatible = "arm,amba-bus", }, - { .compatible = "arm,vexpress,config-bus", }, - {} -}; static void __init v2m_dt_init(void) { l2x0_of_init(0x00400000, 0xfe0fffff); - of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } static const char * const v2m_dt_match[] __initconst = { diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi index 2f2ecd217363..ac2cb2418025 100644 --- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi @@ -200,7 +200,7 @@ }; mcc { - compatible = "arm,vexpress,config-bus", "simple-bus"; + compatible = "arm,vexpress,config-bus"; arm,vexpress,config-bridge = <&v2m_sysreg>; v2m_oscclk1: osc@1 { diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 552373c4e362..f24e79dd51bf 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -41,4 +41,13 @@ config ARM_CCI help Driver supporting the CCI cache coherent interconnect for ARM platforms. + +config VEXPRESS_CONFIG + bool "Versatile Express configuration bus" + default y if ARCH_VEXPRESS + depends on ARM || ARM64 + select REGMAP + help + Platform configuration infrastructure for the ARM Ltd. + Versatile Express. endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 8947bdd0de8b..f095aa771de9 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -10,3 +10,5 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o # CCI cache coherent interconnect for ARM platforms obj-$(CONFIG_ARM_CCI) += arm-cci.o + +obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c new file mode 100644 index 000000000000..27a07dfcd626 --- /dev/null +++ b/drivers/bus/vexpress-config.c @@ -0,0 +1,202 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include +#include +#include +#include +#include + + +struct vexpress_config_bridge { + struct vexpress_config_bridge_ops *ops; + void *context; +}; + + +static DEFINE_MUTEX(vexpress_config_mutex); +static struct class *vexpress_config_class; +static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER; + + +void vexpress_config_set_master(u32 site) +{ + vexpress_config_site_master = site; +} + +u32 vexpress_config_get_master(void) +{ + return vexpress_config_site_master; +} + +void vexpress_config_lock(void *arg) +{ + mutex_lock(&vexpress_config_mutex); +} + +void vexpress_config_unlock(void *arg) +{ + mutex_unlock(&vexpress_config_mutex); +} + + +static void vexpress_config_find_prop(struct device_node *node, + const char *name, u32 *val) +{ + /* Default value */ + *val = 0; + + of_node_get(node); + while (node) { + if (of_property_read_u32(node, name, val) == 0) { + of_node_put(node); + return; + } + node = of_get_next_parent(node); + } +} + +int vexpress_config_get_topo(struct device_node *node, u32 *site, + u32 *position, u32 *dcc) +{ + vexpress_config_find_prop(node, "arm,vexpress,site", site); + if (*site == VEXPRESS_SITE_MASTER) + *site = vexpress_config_site_master; + if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER)) + return -EINVAL; + vexpress_config_find_prop(node, "arm,vexpress,position", position); + vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc); + + return 0; +} + + +static void vexpress_config_devres_release(struct device *dev, void *res) +{ + struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent); + struct regmap *regmap = res; + + bridge->ops->regmap_exit(regmap, bridge->context); +} + +struct regmap *devm_regmap_init_vexpress_config(struct device *dev) +{ + struct vexpress_config_bridge *bridge; + struct regmap *regmap; + struct regmap **res; + + if (WARN_ON(dev->parent->class != vexpress_config_class)) + return ERR_PTR(-ENODEV); + + bridge = dev_get_drvdata(dev->parent); + if (WARN_ON(!bridge)) + return ERR_PTR(-EINVAL); + + res = devres_alloc(vexpress_config_devres_release, sizeof(*res), + GFP_KERNEL); + if (!res) + return ERR_PTR(-ENOMEM); + + regmap = bridge->ops->regmap_init(dev, bridge->context); + if (IS_ERR(regmap)) { + devres_free(res); + return regmap; + } + + *res = regmap; + devres_add(dev, res); + + return regmap; +} + + +struct device *vexpress_config_bridge_register(struct device *parent, + struct vexpress_config_bridge_ops *ops, void *context) +{ + struct device *dev; + struct vexpress_config_bridge *bridge; + + if (!vexpress_config_class) { + vexpress_config_class = class_create(THIS_MODULE, + "vexpress-config"); + if (IS_ERR(vexpress_config_class)) + return (void *)vexpress_config_class; + } + + dev = device_create(vexpress_config_class, parent, 0, + NULL, "%s.bridge", dev_name(parent)); + + if (IS_ERR(dev)) + return dev; + + bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL); + if (!bridge) { + put_device(dev); + device_unregister(dev); + return ERR_PTR(-ENOMEM); + } + bridge->ops = ops; + bridge->context = context; + + dev_set_drvdata(dev, bridge); + + dev_dbg(parent, "Registered bridge '%s', parent node %p\n", + dev_name(dev), parent->of_node); + + return dev; +} + + +static int vexpress_config_node_match(struct device *dev, const void *data) +{ + const struct device_node *node = data; + + dev_dbg(dev, "Parent node %p, looking for %p\n", + dev->parent->of_node, node); + + return dev->parent->of_node == node; +} + +static int vexpress_config_populate(struct device_node *node) +{ + struct device_node *bridge; + struct device *parent; + + bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0); + if (!bridge) + return -EINVAL; + + parent = class_find_device(vexpress_config_class, NULL, bridge, + vexpress_config_node_match); + if (WARN_ON(!parent)) + return -ENODEV; + + return of_platform_populate(node, NULL, NULL, parent); +} + +static int __init vexpress_config_init(void) +{ + int err = 0; + struct device_node *node; + + /* Need the config devices early, before the "normal" devices... */ + for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") { + err = vexpress_config_populate(node); + if (err) + break; + } + + return err; +} +postcore_initcall(vexpress_config_init); + diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c index 422391242b39..529a59c0fbfa 100644 --- a/drivers/clk/versatile/clk-vexpress-osc.c +++ b/drivers/clk/versatile/clk-vexpress-osc.c @@ -11,8 +11,6 @@ * Copyright (C) 2012 ARM Limited */ -#define pr_fmt(fmt) "vexpress-osc: " fmt - #include #include #include @@ -22,7 +20,7 @@ #include struct vexpress_osc { - struct vexpress_config_func *func; + struct regmap *reg; struct clk_hw hw; unsigned long rate_min; unsigned long rate_max; @@ -36,7 +34,7 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw, struct vexpress_osc *osc = to_vexpress_osc(hw); u32 rate; - vexpress_config_read(osc->func, 0, &rate); + regmap_read(osc->reg, 0, &rate); return rate; } @@ -60,7 +58,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate, { struct vexpress_osc *osc = to_vexpress_osc(hw); - return vexpress_config_write(osc->func, 0, rate); + return regmap_write(osc->reg, 0, rate); } static struct clk_ops vexpress_osc_ops = { @@ -70,58 +68,31 @@ static struct clk_ops vexpress_osc_ops = { }; -struct clk * __init vexpress_osc_setup(struct device *dev) -{ - struct clk_init_data init; - struct vexpress_osc *osc = kzalloc(sizeof(*osc), GFP_KERNEL); - - if (!osc) - return NULL; - - osc->func = vexpress_config_func_get_by_dev(dev); - if (!osc->func) { - kfree(osc); - return NULL; - } - - init.name = dev_name(dev); - init.ops = &vexpress_osc_ops; - init.flags = CLK_IS_ROOT; - init.num_parents = 0; - osc->hw.init = &init; - - return clk_register(NULL, &osc->hw); -} - -void __init vexpress_osc_of_setup(struct device_node *node) +static int vexpress_osc_probe(struct platform_device *pdev) { + struct clk_lookup *cl = pdev->dev.platform_data; /* Non-DT lookup */ struct clk_init_data init; struct vexpress_osc *osc; struct clk *clk; u32 range[2]; - vexpress_sysreg_of_early_init(); - - osc = kzalloc(sizeof(*osc), GFP_KERNEL); + osc = devm_kzalloc(&pdev->dev, sizeof(*osc), GFP_KERNEL); if (!osc) - return; + return -ENOMEM; - osc->func = vexpress_config_func_get_by_node(node); - if (!osc->func) { - pr_err("Failed to obtain config func for node '%s'!\n", - node->full_name); - goto error; - } + osc->reg = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(osc->reg)) + return PTR_ERR(osc->reg); - if (of_property_read_u32_array(node, "freq-range", range, + if (of_property_read_u32_array(pdev->dev.of_node, "freq-range", range, ARRAY_SIZE(range)) == 0) { osc->rate_min = range[0]; osc->rate_max = range[1]; } - of_property_read_string(node, "clock-output-names", &init.name); - if (!init.name) - init.name = node->full_name; + if (of_property_read_string(pdev->dev.of_node, "clock-output-names", + &init.name) != 0) + init.name = dev_name(&pdev->dev); init.ops = &vexpress_osc_ops; init.flags = CLK_IS_ROOT; @@ -130,20 +101,37 @@ void __init vexpress_osc_of_setup(struct device_node *node) osc->hw.init = &init; clk = clk_register(NULL, &osc->hw); - if (IS_ERR(clk)) { - pr_err("Failed to register clock '%s'!\n", init.name); - goto error; + if (IS_ERR(clk)) + return PTR_ERR(clk); + + of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk); + + /* Only happens for non-DT cases */ + if (cl) { + cl->clk = clk; + clkdev_add(cl); } - of_clk_add_provider(node, of_clk_src_simple_get, clk); + dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name); + + return 0; +} - pr_debug("Registered clock '%s'\n", init.name); +static struct of_device_id vexpress_osc_of_match[] = { + { .compatible = "arm,vexpress-osc", }, + {} +}; - return; +static struct platform_driver vexpress_osc_driver = { + .driver = { + .name = "vexpress-osc", + .of_match_table = vexpress_osc_of_match, + }, + .probe = vexpress_osc_probe, +}; -error: - if (osc->func) - vexpress_config_func_put(osc->func); - kfree(osc); +static int __init vexpress_osc_init(void) +{ + return platform_driver_register(&vexpress_osc_driver); } -CLK_OF_DECLARE(vexpress_soc, "arm,vexpress-osc", vexpress_osc_of_setup); +core_initcall(vexpress_osc_init); diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c index 8242b75d96c8..611f34c7333d 100644 --- a/drivers/hwmon/vexpress.c +++ b/drivers/hwmon/vexpress.c @@ -26,7 +26,7 @@ struct vexpress_hwmon_data { struct device *hwmon_dev; - struct vexpress_config_func *func; + struct regmap *reg; const char *name; }; @@ -53,7 +53,7 @@ static ssize_t vexpress_hwmon_u32_show(struct device *dev, int err; u32 value; - err = vexpress_config_read(data->func, 0, &value); + err = regmap_read(data->reg, 0, &value); if (err) return err; @@ -68,11 +68,11 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev, int err; u32 value_hi, value_lo; - err = vexpress_config_read(data->func, 0, &value_lo); + err = regmap_read(data->reg, 0, &value_lo); if (err) return err; - err = vexpress_config_read(data->func, 1, &value_hi); + err = regmap_read(data->reg, 1, &value_hi); if (err) return err; @@ -234,9 +234,9 @@ static int vexpress_hwmon_probe(struct platform_device *pdev) type = match->data; data->name = type->name; - data->func = vexpress_config_func_get_by_dev(&pdev->dev); - if (!data->func) - return -ENODEV; + data->reg = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(data->reg)) + return PTR_ERR(data->reg); err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups); if (err) @@ -252,7 +252,6 @@ static int vexpress_hwmon_probe(struct platform_device *pdev) error: sysfs_remove_group(&pdev->dev.kobj, match->data); - vexpress_config_func_put(data->func); return err; } @@ -266,8 +265,6 @@ static int vexpress_hwmon_remove(struct platform_device *pdev) match = of_match_device(vexpress_hwmon_of_match, &pdev->dev); sysfs_remove_group(&pdev->dev.kobj, match->data); - vexpress_config_func_put(data->func); - return 0; } diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 2851275e2656..9ba838eb5131 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -161,7 +161,7 @@ obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o obj-$(CONFIG_MFD_SYSCON) += syscon.o obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o -obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o +obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-sysreg.o obj-$(CONFIG_MFD_RETU) += retu-mfd.o obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c deleted file mode 100644 index d0db89d13e01..000000000000 --- a/drivers/mfd/vexpress-config.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2012 ARM Limited - */ - -#define pr_fmt(fmt) "vexpress-config: " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#define VEXPRESS_CONFIG_MAX_BRIDGES 2 - -static struct vexpress_config_bridge { - struct device_node *node; - struct vexpress_config_bridge_info *info; - struct list_head transactions; - spinlock_t transactions_lock; -} vexpress_config_bridges[VEXPRESS_CONFIG_MAX_BRIDGES]; - -static DECLARE_BITMAP(vexpress_config_bridges_map, - ARRAY_SIZE(vexpress_config_bridges)); -static DEFINE_MUTEX(vexpress_config_bridges_mutex); - -struct vexpress_config_bridge *vexpress_config_bridge_register( - struct device_node *node, - struct vexpress_config_bridge_info *info) -{ - struct vexpress_config_bridge *bridge; - int i; - - pr_debug("Registering bridge '%s'\n", info->name); - - mutex_lock(&vexpress_config_bridges_mutex); - i = find_first_zero_bit(vexpress_config_bridges_map, - ARRAY_SIZE(vexpress_config_bridges)); - if (i >= ARRAY_SIZE(vexpress_config_bridges)) { - pr_err("Can't register more bridges!\n"); - mutex_unlock(&vexpress_config_bridges_mutex); - return NULL; - } - __set_bit(i, vexpress_config_bridges_map); - bridge = &vexpress_config_bridges[i]; - - bridge->node = node; - bridge->info = info; - INIT_LIST_HEAD(&bridge->transactions); - spin_lock_init(&bridge->transactions_lock); - - mutex_unlock(&vexpress_config_bridges_mutex); - - return bridge; -} -EXPORT_SYMBOL(vexpress_config_bridge_register); - -void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge) -{ - struct vexpress_config_bridge __bridge = *bridge; - int i; - - mutex_lock(&vexpress_config_bridges_mutex); - for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++) - if (&vexpress_config_bridges[i] == bridge) - __clear_bit(i, vexpress_config_bridges_map); - mutex_unlock(&vexpress_config_bridges_mutex); - - WARN_ON(!list_empty(&__bridge.transactions)); - while (!list_empty(&__bridge.transactions)) - cpu_relax(); -} -EXPORT_SYMBOL(vexpress_config_bridge_unregister); - - -struct vexpress_config_func { - struct vexpress_config_bridge *bridge; - void *func; -}; - -struct vexpress_config_func *__vexpress_config_func_get(struct device *dev, - struct device_node *node) -{ - struct device_node *bridge_node; - struct vexpress_config_func *func; - int i; - - if (WARN_ON(dev && node && dev->of_node != node)) - return NULL; - if (dev && !node) - node = dev->of_node; - - func = kzalloc(sizeof(*func), GFP_KERNEL); - if (!func) - return NULL; - - bridge_node = of_node_get(node); - while (bridge_node) { - const __be32 *prop = of_get_property(bridge_node, - "arm,vexpress,config-bridge", NULL); - - if (prop) { - bridge_node = of_find_node_by_phandle( - be32_to_cpup(prop)); - break; - } - - bridge_node = of_get_next_parent(bridge_node); - } - - mutex_lock(&vexpress_config_bridges_mutex); - for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++) { - struct vexpress_config_bridge *bridge = - &vexpress_config_bridges[i]; - - if (test_bit(i, vexpress_config_bridges_map) && - bridge->node == bridge_node) { - func->bridge = bridge; - func->func = bridge->info->func_get(dev, node); - break; - } - } - mutex_unlock(&vexpress_config_bridges_mutex); - - if (!func->func) { - of_node_put(node); - kfree(func); - return NULL; - } - - return func; -} -EXPORT_SYMBOL(__vexpress_config_func_get); - -void vexpress_config_func_put(struct vexpress_config_func *func) -{ - func->bridge->info->func_put(func->func); - of_node_put(func->bridge->node); - kfree(func); -} -EXPORT_SYMBOL(vexpress_config_func_put); - -struct vexpress_config_trans { - struct vexpress_config_func *func; - int offset; - bool write; - u32 *data; - int status; - struct completion completion; - struct list_head list; -}; - -static void vexpress_config_dump_trans(const char *what, - struct vexpress_config_trans *trans) -{ - pr_debug("%s %s trans %p func 0x%p offset %d data 0x%x status %d\n", - what, trans->write ? "write" : "read", trans, - trans->func->func, trans->offset, - trans->data ? *trans->data : 0, trans->status); -} - -static int vexpress_config_schedule(struct vexpress_config_trans *trans) -{ - int status; - struct vexpress_config_bridge *bridge = trans->func->bridge; - unsigned long flags; - - init_completion(&trans->completion); - trans->status = -EFAULT; - - spin_lock_irqsave(&bridge->transactions_lock, flags); - - if (list_empty(&bridge->transactions)) { - vexpress_config_dump_trans("Executing", trans); - status = bridge->info->func_exec(trans->func->func, - trans->offset, trans->write, trans->data); - } else { - vexpress_config_dump_trans("Queuing", trans); - status = VEXPRESS_CONFIG_STATUS_WAIT; - } - - switch (status) { - case VEXPRESS_CONFIG_STATUS_DONE: - vexpress_config_dump_trans("Finished", trans); - trans->status = status; - break; - case VEXPRESS_CONFIG_STATUS_WAIT: - list_add_tail(&trans->list, &bridge->transactions); - break; - } - - spin_unlock_irqrestore(&bridge->transactions_lock, flags); - - return status; -} - -void vexpress_config_complete(struct vexpress_config_bridge *bridge, - int status) -{ - struct vexpress_config_trans *trans; - unsigned long flags; - const char *message = "Completed"; - - spin_lock_irqsave(&bridge->transactions_lock, flags); - - trans = list_first_entry(&bridge->transactions, - struct vexpress_config_trans, list); - trans->status = status; - - do { - vexpress_config_dump_trans(message, trans); - list_del(&trans->list); - complete(&trans->completion); - - if (list_empty(&bridge->transactions)) - break; - - trans = list_first_entry(&bridge->transactions, - struct vexpress_config_trans, list); - vexpress_config_dump_trans("Executing pending", trans); - trans->status = bridge->info->func_exec(trans->func->func, - trans->offset, trans->write, trans->data); - message = "Finished pending"; - } while (trans->status == VEXPRESS_CONFIG_STATUS_DONE); - - spin_unlock_irqrestore(&bridge->transactions_lock, flags); -} -EXPORT_SYMBOL(vexpress_config_complete); - -int vexpress_config_wait(struct vexpress_config_trans *trans) -{ - wait_for_completion(&trans->completion); - - return trans->status; -} -EXPORT_SYMBOL(vexpress_config_wait); - -int vexpress_config_read(struct vexpress_config_func *func, int offset, - u32 *data) -{ - struct vexpress_config_trans trans = { - .func = func, - .offset = offset, - .write = false, - .data = data, - .status = 0, - }; - int status = vexpress_config_schedule(&trans); - - if (status == VEXPRESS_CONFIG_STATUS_WAIT) - status = vexpress_config_wait(&trans); - - return status; -} -EXPORT_SYMBOL(vexpress_config_read); - -int vexpress_config_write(struct vexpress_config_func *func, int offset, - u32 data) -{ - struct vexpress_config_trans trans = { - .func = func, - .offset = offset, - .write = true, - .data = &data, - .status = 0, - }; - int status = vexpress_config_schedule(&trans); - - if (status == VEXPRESS_CONFIG_STATUS_WAIT) - status = vexpress_config_wait(&trans); - - return status; -} -EXPORT_SYMBOL(vexpress_config_write); diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index 35281e804e7e..b4138a7168db 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -16,8 +16,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -72,9 +74,18 @@ static void __iomem *vexpress_sysreg_base; static struct device *vexpress_sysreg_dev; -static int vexpress_master_site; +static LIST_HEAD(vexpress_sysreg_config_funcs); +static struct device *vexpress_sysreg_config_bridge; +static int vexpress_sysreg_get_master(void) +{ + if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) + return VEXPRESS_SITE_DB2; + + return VEXPRESS_SITE_DB1; +} + void vexpress_flags_set(u32 data) { writel(~0, vexpress_sysreg_base + SYS_FLAGSCLR); @@ -84,7 +95,7 @@ void vexpress_flags_set(u32 data) u32 vexpress_get_procid(int site) { if (site == VEXPRESS_SITE_MASTER) - site = vexpress_master_site; + site = vexpress_sysreg_get_master(); return readl(vexpress_sysreg_base + (site == VEXPRESS_SITE_DB1 ? SYS_PROCID0 : SYS_PROCID1)); @@ -114,130 +125,33 @@ void __iomem *vexpress_get_24mhz_clock_base(void) } -static void vexpress_sysreg_find_prop(struct device_node *node, - const char *name, u32 *val) -{ - of_node_get(node); - while (node) { - if (of_property_read_u32(node, name, val) == 0) { - of_node_put(node); - return; - } - node = of_get_next_parent(node); - } -} - -unsigned __vexpress_get_site(struct device *dev, struct device_node *node) -{ - u32 site = 0; - - WARN_ON(dev && node && dev->of_node != node); - if (dev && !node) - node = dev->of_node; - - if (node) { - vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site); - } else if (dev && dev->bus == &platform_bus_type) { - struct platform_device *pdev = to_platform_device(dev); - - if (pdev->num_resources == 1 && - pdev->resource[0].flags == IORESOURCE_BUS) - site = pdev->resource[0].start; - } else if (dev && strncmp(dev_name(dev), "ct:", 3) == 0) { - site = VEXPRESS_SITE_MASTER; - } - - if (site == VEXPRESS_SITE_MASTER) - site = vexpress_master_site; - - return site; -} - - struct vexpress_sysreg_config_func { - u32 template; - u32 device; + struct list_head list; + struct regmap *regmap; + int num_templates; + u32 template[0]; /* Keep this last */ }; -static struct vexpress_config_bridge *vexpress_sysreg_config_bridge; -static struct timer_list vexpress_sysreg_config_timer; -static u32 *vexpress_sysreg_config_data; -static int vexpress_sysreg_config_tries; - -static void *vexpress_sysreg_config_func_get(struct device *dev, - struct device_node *node) +static int vexpress_sysreg_config_exec(struct vexpress_sysreg_config_func *func, + int index, bool write, u32 *data) { - struct vexpress_sysreg_config_func *config_func; - u32 site = 0; - u32 position = 0; - u32 dcc = 0; - u32 func_device[2]; - int err = -EFAULT; - - if (node) { - of_node_get(node); - vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site); - vexpress_sysreg_find_prop(node, "arm,vexpress,position", - &position); - vexpress_sysreg_find_prop(node, "arm,vexpress,dcc", &dcc); - err = of_property_read_u32_array(node, - "arm,vexpress-sysreg,func", func_device, - ARRAY_SIZE(func_device)); - of_node_put(node); - } else if (dev && dev->bus == &platform_bus_type) { - struct platform_device *pdev = to_platform_device(dev); - - if (pdev->num_resources == 1 && - pdev->resource[0].flags == IORESOURCE_BUS) { - site = pdev->resource[0].start; - func_device[0] = pdev->resource[0].end; - func_device[1] = pdev->id; - err = 0; - } - } - if (err) - return NULL; - - config_func = kzalloc(sizeof(*config_func), GFP_KERNEL); - if (!config_func) - return NULL; - - config_func->template = SYS_CFGCTRL_DCC(dcc); - config_func->template |= SYS_CFGCTRL_FUNC(func_device[0]); - config_func->template |= SYS_CFGCTRL_SITE(site == VEXPRESS_SITE_MASTER ? - vexpress_master_site : site); - config_func->template |= SYS_CFGCTRL_POSITION(position); - config_func->device |= func_device[1]; - - dev_dbg(vexpress_sysreg_dev, "func 0x%p = 0x%x, %d\n", config_func, - config_func->template, config_func->device); - - return config_func; -} - -static void vexpress_sysreg_config_func_put(void *func) -{ - kfree(func); -} - -static int vexpress_sysreg_config_func_exec(void *func, int offset, - bool write, u32 *data) -{ - int status; - struct vexpress_sysreg_config_func *config_func = func; - u32 command; + u32 command, status; + int tries; + long timeout; if (WARN_ON(!vexpress_sysreg_base)) return -ENOENT; + if (WARN_ON(index > func->num_templates)) + return -EINVAL; + command = readl(vexpress_sysreg_base + SYS_CFGCTRL); if (WARN_ON(command & SYS_CFGCTRL_START)) return -EBUSY; - command = SYS_CFGCTRL_START; + command = func->template[index]; + command |= SYS_CFGCTRL_START; command |= write ? SYS_CFGCTRL_WRITE : 0; - command |= config_func->template; - command |= SYS_CFGCTRL_DEVICE(config_func->device + offset); /* Use a canary for reads */ if (!write) @@ -250,90 +164,190 @@ static int vexpress_sysreg_config_func_exec(void *func, int offset, writel(command, vexpress_sysreg_base + SYS_CFGCTRL); mb(); - if (vexpress_sysreg_dev) { - /* Schedule completion check */ - if (!write) - vexpress_sysreg_config_data = data; - vexpress_sysreg_config_tries = 100; - mod_timer(&vexpress_sysreg_config_timer, - jiffies + usecs_to_jiffies(100)); - status = VEXPRESS_CONFIG_STATUS_WAIT; - } else { - /* Early execution, no timer available, have to spin */ - u32 cfgstat; + /* The operation can take ages... Go to sleep, 100us initially */ + tries = 100; + timeout = 100; + do { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(timeout)); + if (signal_pending(current)) + return -EINTR; + + status = readl(vexpress_sysreg_base + SYS_CFGSTAT); + if (status & SYS_CFGSTAT_ERR) + return -EFAULT; + + if (timeout > 20) + timeout -= 20; + } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); + if (WARN_ON_ONCE(!tries)) + return -ETIMEDOUT; + + if (!write) { + *data = readl(vexpress_sysreg_base + SYS_CFGDATA); + dev_dbg(vexpress_sysreg_dev, "func %p, read data %x\n", + func, *data); + } - do { - cpu_relax(); - cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT); - } while (!cfgstat); + return 0; +} - if (!write && (cfgstat & SYS_CFGSTAT_COMPLETE)) - *data = readl(vexpress_sysreg_base + SYS_CFGDATA); - status = VEXPRESS_CONFIG_STATUS_DONE; +static int vexpress_sysreg_config_read(void *context, unsigned int index, + unsigned int *val) +{ + struct vexpress_sysreg_config_func *func = context; - if (cfgstat & SYS_CFGSTAT_ERR) - status = -EINVAL; - } + return vexpress_sysreg_config_exec(func, index, false, val); +} - return status; +static int vexpress_sysreg_config_write(void *context, unsigned int index, + unsigned int val) +{ + struct vexpress_sysreg_config_func *func = context; + + return vexpress_sysreg_config_exec(func, index, true, &val); } -struct vexpress_config_bridge_info vexpress_sysreg_config_bridge_info = { - .name = "vexpress-sysreg", - .func_get = vexpress_sysreg_config_func_get, - .func_put = vexpress_sysreg_config_func_put, - .func_exec = vexpress_sysreg_config_func_exec, +struct regmap_config vexpress_sysreg_regmap_config = { + .lock = vexpress_config_lock, + .unlock = vexpress_config_unlock, + .reg_bits = 32, + .val_bits = 32, + .reg_read = vexpress_sysreg_config_read, + .reg_write = vexpress_sysreg_config_write, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, }; -static void vexpress_sysreg_config_complete(unsigned long data) +static struct regmap *vexpress_sysreg_config_regmap_init(struct device *dev, + void *context) { - int status = VEXPRESS_CONFIG_STATUS_DONE; - u32 cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT); - - if (cfgstat & SYS_CFGSTAT_ERR) - status = -EINVAL; - if (!vexpress_sysreg_config_tries--) - status = -ETIMEDOUT; - - if (status < 0) { - dev_err(vexpress_sysreg_dev, "error %d\n", status); - } else if (!(cfgstat & SYS_CFGSTAT_COMPLETE)) { - mod_timer(&vexpress_sysreg_config_timer, - jiffies + usecs_to_jiffies(50)); - return; + struct platform_device *pdev = to_platform_device(dev); + struct vexpress_sysreg_config_func *func; + struct property *prop; + const __be32 *val = NULL; + __be32 energy_quirk[4]; + int num; + u32 site, position, dcc; + int err; + int i; + + if (dev->of_node) { + err = vexpress_config_get_topo(dev->of_node, &site, &position, + &dcc); + if (err) + return ERR_PTR(err); + + prop = of_find_property(dev->of_node, + "arm,vexpress-sysreg,func", NULL); + if (!prop) + return ERR_PTR(-EINVAL); + + num = prop->length / sizeof(u32) / 2; + val = prop->value; + } else { + if (pdev->num_resources != 1 || + pdev->resource[0].flags != IORESOURCE_BUS) + return ERR_PTR(-EFAULT); + + site = pdev->resource[0].start; + if (site == VEXPRESS_SITE_MASTER) + site = vexpress_sysreg_get_master(); + position = 0; + dcc = 0; + num = 1; } - if (vexpress_sysreg_config_data) { - *vexpress_sysreg_config_data = readl(vexpress_sysreg_base + - SYS_CFGDATA); - dev_dbg(vexpress_sysreg_dev, "read data %x\n", - *vexpress_sysreg_config_data); - vexpress_sysreg_config_data = NULL; + /* + * "arm,vexpress-energy" function used to be described + * by its first device only, now it requires both + */ + if (num == 1 && of_device_is_compatible(dev->of_node, + "arm,vexpress-energy")) { + num = 2; + energy_quirk[0] = *val; + energy_quirk[2] = *val++; + energy_quirk[1] = *val; + energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); + val = energy_quirk; } - vexpress_config_complete(vexpress_sysreg_config_bridge, status); -} + func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, + GFP_KERNEL); + if (!func) + return NULL; + func->num_templates = num; -void vexpress_sysreg_setup(struct device_node *node) -{ - if (WARN_ON(!vexpress_sysreg_base)) - return; + for (i = 0; i < num; i++) { + u32 function, device; - if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) - vexpress_master_site = VEXPRESS_SITE_DB2; + if (dev->of_node) { + function = be32_to_cpup(val++); + device = be32_to_cpup(val++); + } else { + function = pdev->resource[0].end; + device = pdev->id; + } + + dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", + func, site, position, dcc, + function, device); + + func->template[i] = SYS_CFGCTRL_DCC(dcc); + func->template[i] |= SYS_CFGCTRL_SITE(site); + func->template[i] |= SYS_CFGCTRL_POSITION(position); + func->template[i] |= SYS_CFGCTRL_FUNC(function); + func->template[i] |= SYS_CFGCTRL_DEVICE(device); + } + + vexpress_sysreg_regmap_config.max_register = num - 1; + + func->regmap = regmap_init(dev, NULL, func, + &vexpress_sysreg_regmap_config); + + if (IS_ERR(func->regmap)) + kfree(func); else - vexpress_master_site = VEXPRESS_SITE_DB1; + list_add(&func->list, &vexpress_sysreg_config_funcs); - vexpress_sysreg_config_bridge = vexpress_config_bridge_register( - node, &vexpress_sysreg_config_bridge_info); - WARN_ON(!vexpress_sysreg_config_bridge); + return func->regmap; +} + +static void vexpress_sysreg_config_regmap_exit(struct regmap *regmap, + void *context) +{ + struct vexpress_sysreg_config_func *func, *tmp; + + regmap_exit(regmap); + + list_for_each_entry_safe(func, tmp, &vexpress_sysreg_config_funcs, + list) { + if (func->regmap == regmap) { + list_del(&vexpress_sysreg_config_funcs); + kfree(func); + break; + } + } +} + +static struct vexpress_config_bridge_ops vexpress_sysreg_config_bridge_ops = { + .regmap_init = vexpress_sysreg_config_regmap_init, + .regmap_exit = vexpress_sysreg_config_regmap_exit, +}; + +int vexpress_sysreg_config_device_register(struct platform_device *pdev) +{ + pdev->dev.parent = vexpress_sysreg_config_bridge; + + return platform_device_register(pdev); } + void __init vexpress_sysreg_early_init(void __iomem *base) { vexpress_sysreg_base = base; - vexpress_sysreg_setup(NULL); + vexpress_config_set_master(vexpress_sysreg_get_master()); } void __init vexpress_sysreg_of_early_init(void) @@ -344,10 +358,14 @@ void __init vexpress_sysreg_of_early_init(void) return; node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg"); - if (node) { - vexpress_sysreg_base = of_iomap(node, 0); - vexpress_sysreg_setup(node); - } + if (WARN_ON(!node)) + return; + + vexpress_sysreg_base = of_iomap(node, 0); + if (WARN_ON(!vexpress_sysreg_base)) + return; + + vexpress_config_set_master(vexpress_sysreg_get_master()); } @@ -470,28 +488,22 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) return -EBUSY; } - if (!vexpress_sysreg_base) { + if (!vexpress_sysreg_base) vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - vexpress_sysreg_setup(pdev->dev.of_node); - } if (!vexpress_sysreg_base) { dev_err(&pdev->dev, "Failed to obtain base address!\n"); return -EFAULT; } - setup_timer(&vexpress_sysreg_config_timer, - vexpress_sysreg_config_complete, 0); - + vexpress_config_set_master(vexpress_sysreg_get_master()); vexpress_sysreg_dev = &pdev->dev; #ifdef CONFIG_GPIOLIB vexpress_sysreg_gpio_chip.dev = &pdev->dev; err = gpiochip_add(&vexpress_sysreg_gpio_chip); if (err) { - vexpress_config_bridge_unregister( - vexpress_sysreg_config_bridge); dev_err(&pdev->dev, "Failed to register GPIO chip! (%d)\n", err); return err; @@ -502,6 +514,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) sizeof(vexpress_sysreg_leds_pdata)); #endif + vexpress_sysreg_config_bridge = vexpress_config_bridge_register( + &pdev->dev, &vexpress_sysreg_config_bridge_ops, NULL); + WARN_ON(!vexpress_sysreg_config_bridge); + device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id); return 0; @@ -522,7 +538,12 @@ static struct platform_driver vexpress_sysreg_driver = { static int __init vexpress_sysreg_init(void) { - vexpress_sysreg_of_early_init(); + struct device_node *node; + + /* Need the sysreg early, before any other device... */ + for_each_matching_node(node, vexpress_sysreg_match) + of_platform_device_create(node, NULL, NULL); + return platform_driver_register(&vexpress_sysreg_driver); } core_initcall(vexpress_sysreg_init); diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index b95cf71ed695..4dc102e2b230 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -23,10 +23,10 @@ static void vexpress_reset_do(struct device *dev, const char *what) { int err = -ENOENT; - struct vexpress_config_func *func = dev_get_drvdata(dev); + struct regmap *reg = dev_get_drvdata(dev); - if (func) { - err = vexpress_config_write(func, 0, 0); + if (reg) { + err = regmap_write(reg, 0, 0); if (!err) mdelay(1000); } @@ -91,17 +91,17 @@ static int vexpress_reset_probe(struct platform_device *pdev) enum vexpress_reset_func func; const struct of_device_id *match = of_match_device(vexpress_reset_of_match, &pdev->dev); - struct vexpress_config_func *config_func; + struct regmap *regmap; if (match) func = (enum vexpress_reset_func)match->data; else func = pdev->id_entry->driver_data; - config_func = vexpress_config_func_get_by_dev(&pdev->dev); - if (!config_func) - return -EINVAL; - dev_set_drvdata(&pdev->dev, config_func); + regmap = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + dev_set_drvdata(&pdev->dev, regmap); switch (func) { case FUNC_SHUTDOWN: diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c index f3ae28a7e663..2863428813e4 100644 --- a/drivers/regulator/vexpress.c +++ b/drivers/regulator/vexpress.c @@ -26,14 +26,14 @@ struct vexpress_regulator { struct regulator_desc desc; struct regulator_dev *regdev; - struct vexpress_config_func *func; + struct regmap *regmap; }; static int vexpress_regulator_get_voltage(struct regulator_dev *regdev) { struct vexpress_regulator *reg = rdev_get_drvdata(regdev); u32 uV; - int err = vexpress_config_read(reg->func, 0, &uV); + int err = regmap_read(reg->regmap, 0, &uV); return err ? err : uV; } @@ -43,7 +43,7 @@ static int vexpress_regulator_set_voltage(struct regulator_dev *regdev, { struct vexpress_regulator *reg = rdev_get_drvdata(regdev); - return vexpress_config_write(reg->func, 0, min_uV); + return regmap_write(reg->regmap, 0, min_uV); } static struct regulator_ops vexpress_regulator_ops_ro = { @@ -57,22 +57,17 @@ static struct regulator_ops vexpress_regulator_ops = { static int vexpress_regulator_probe(struct platform_device *pdev) { - int err; struct vexpress_regulator *reg; struct regulator_init_data *init_data; struct regulator_config config = { }; reg = devm_kzalloc(&pdev->dev, sizeof(*reg), GFP_KERNEL); - if (!reg) { - err = -ENOMEM; - goto error_kzalloc; - } + if (!reg) + return -ENOMEM; - reg->func = vexpress_config_func_get_by_dev(&pdev->dev); - if (!reg->func) { - err = -ENXIO; - goto error_get_func; - } + reg->regmap = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(reg->regmap)) + return PTR_ERR(reg->regmap); reg->desc.name = dev_name(&pdev->dev); reg->desc.type = REGULATOR_VOLTAGE; @@ -80,10 +75,8 @@ static int vexpress_regulator_probe(struct platform_device *pdev) reg->desc.continuous_voltage_range = true; init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); - if (!init_data) { - err = -EINVAL; - goto error_get_regulator_init_data; - } + if (!init_data) + return -EINVAL; init_data->constraints.apply_uV = 0; if (init_data->constraints.min_uV && init_data->constraints.max_uV) @@ -97,29 +90,11 @@ static int vexpress_regulator_probe(struct platform_device *pdev) config.of_node = pdev->dev.of_node; reg->regdev = devm_regulator_register(&pdev->dev, ®->desc, &config); - if (IS_ERR(reg->regdev)) { - err = PTR_ERR(reg->regdev); - goto error_regulator_register; - } + if (IS_ERR(reg->regdev)) + return PTR_ERR(reg->regdev); platform_set_drvdata(pdev, reg); - return 0; - -error_regulator_register: -error_get_regulator_init_data: - vexpress_config_func_put(reg->func); -error_get_func: -error_kzalloc: - return err; -} - -static int vexpress_regulator_remove(struct platform_device *pdev) -{ - struct vexpress_regulator *reg = platform_get_drvdata(pdev); - - vexpress_config_func_put(reg->func); - return 0; } @@ -130,7 +105,6 @@ static struct of_device_id vexpress_regulator_of_match[] = { static struct platform_driver vexpress_regulator_driver = { .probe = vexpress_regulator_probe, - .remove = vexpress_regulator_remove, .driver = { .name = DRVNAME, .owner = THIS_MODULE, diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index 617c01b8f74a..6b206ba6aa0e 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -15,16 +15,15 @@ #define _LINUX_VEXPRESS_H #include +#include #include +#include #define VEXPRESS_SITE_MB 0 #define VEXPRESS_SITE_DB1 1 #define VEXPRESS_SITE_DB2 2 #define VEXPRESS_SITE_MASTER 0xf -#define VEXPRESS_CONFIG_STATUS_DONE 0 -#define VEXPRESS_CONFIG_STATUS_WAIT 1 - #define VEXPRESS_GPIO_MMC_CARDIN 0 #define VEXPRESS_GPIO_MMC_WPROT 1 #define VEXPRESS_GPIO_FLASH_WPn 2 @@ -44,63 +43,30 @@ .flags = IORESOURCE_BUS, \ } -/* Config bridge API */ +/* Config infrastructure */ -/** - * struct vexpress_config_bridge_info - description of the platform - * configuration infrastructure bridge. - * - * @name: Bridge name - * - * @func_get: Obtains pointer to a configuration function for a given - * device or a Device Tree node, to be used with @func_put - * and @func_exec. The node pointer should take precedence - * over device pointer when both are passed. - * - * @func_put: Tells the bridge that the function will not be used any - * more, so all allocated resources can be released. - * - * @func_exec: Executes a configuration function read or write operation. - * The offset selects a 32 bit word of the value accessed. - * Must return VEXPRESS_CONFIG_STATUS_DONE when operation - * is finished immediately, VEXPRESS_CONFIG_STATUS_WAIT when - * will be completed in some time or negative value in case - * of error. - */ -struct vexpress_config_bridge_info { - const char *name; - void *(*func_get)(struct device *dev, struct device_node *node); - void (*func_put)(void *func); - int (*func_exec)(void *func, int offset, bool write, u32 *data); -}; +void vexpress_config_set_master(u32 site); +u32 vexpress_config_get_master(void); -struct vexpress_config_bridge; +void vexpress_config_lock(void *arg); +void vexpress_config_unlock(void *arg); -struct vexpress_config_bridge *vexpress_config_bridge_register( - struct device_node *node, - struct vexpress_config_bridge_info *info); -void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge); +int vexpress_config_get_topo(struct device_node *node, u32 *site, + u32 *position, u32 *dcc); -void vexpress_config_complete(struct vexpress_config_bridge *bridge, - int status); +/* Config bridge API */ -/* Config function API */ +struct vexpress_config_bridge_ops { + struct regmap * (*regmap_init)(struct device *dev, void *context); + void (*regmap_exit)(struct regmap *regmap, void *context); +}; -struct vexpress_config_func; +struct device *vexpress_config_bridge_register(struct device *parent, + struct vexpress_config_bridge_ops *ops, void *context); -struct vexpress_config_func *__vexpress_config_func_get(struct device *dev, - struct device_node *node); -#define vexpress_config_func_get_by_dev(dev) \ - __vexpress_config_func_get(dev, NULL) -#define vexpress_config_func_get_by_node(node) \ - __vexpress_config_func_get(NULL, node) -void vexpress_config_func_put(struct vexpress_config_func *func); +/* Config regmap API */ -/* Both may sleep! */ -int vexpress_config_read(struct vexpress_config_func *func, int offset, - u32 *data); -int vexpress_config_write(struct vexpress_config_func *func, int offset, - u32 data); +struct regmap *devm_regmap_init_vexpress_config(struct device *dev); /* Platform control */ @@ -109,19 +75,12 @@ u32 vexpress_get_hbi(int site); void *vexpress_get_24mhz_clock_base(void); void vexpress_flags_set(u32 data); -#define vexpress_get_site_by_node(node) __vexpress_get_site(NULL, node) -#define vexpress_get_site_by_dev(dev) __vexpress_get_site(dev, NULL) -unsigned __vexpress_get_site(struct device *dev, struct device_node *node); - void vexpress_sysreg_early_init(void __iomem *base); void vexpress_sysreg_of_early_init(void); +int vexpress_sysreg_config_device_register(struct platform_device *pdev); /* Clocks */ -struct clk *vexpress_osc_setup(struct device *dev); -void vexpress_osc_of_setup(struct device_node *node); - void vexpress_clk_init(void __iomem *sp810_base); -void vexpress_clk_of_init(void); #endif -- cgit v1.2.3 From 29f9b6cf7bff6a118130163c848811e14f8022da Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Wed, 12 Feb 2014 10:47:10 +0000 Subject: mfd: syscon: Add platform data with a regmap config name Define syscon platform data structure that can be used to define a regmap config name. This is particularly useful in the regmap debugfs when there is more than one syscon device registered, to distinguish the register blocks. Signed-off-by: Pawel Moll Acked-by: Lee Jones --- drivers/mfd/syscon.c | 4 ++++ include/linux/platform_data/syscon.h | 8 ++++++++ 2 files changed, 12 insertions(+) create mode 100644 include/linux/platform_data/syscon.h (limited to 'include/linux') diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index dbea55de4397..e2a04bb8bc1e 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,7 @@ static struct regmap_config syscon_regmap_config = { static int syscon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct syscon_platform_data *pdata = dev_get_platdata(dev); struct syscon *syscon; struct resource *res; void __iomem *base; @@ -136,6 +138,8 @@ static int syscon_probe(struct platform_device *pdev) return -ENOMEM; syscon_regmap_config.max_register = res->end - res->start - 3; + if (pdata) + syscon_regmap_config.name = pdata->label; syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_regmap_config); if (IS_ERR(syscon->regmap)) { diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h new file mode 100644 index 000000000000..2354c6fa3726 --- /dev/null +++ b/include/linux/platform_data/syscon.h @@ -0,0 +1,8 @@ +#ifndef PLATFORM_DATA_SYSCON_H +#define PLATFORM_DATA_SYSCON_H + +struct syscon_platform_data { + const char *label; +}; + +#endif -- cgit v1.2.3 From 974cc7b93441a0e78f030495436d1be7eb7c208d Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Wed, 23 Apr 2014 10:49:31 +0100 Subject: mfd: vexpress: Define the device as MFD cells This patch - finally, after over 6 months! :-( - addresses Samuel's request to split the vexpress-sysreg driver into smaller portions and define the device in a form of MFD cells: * LEDs code has been completely removed and replaced with "gpio-leds" nodes in the tree (referencing dedicated GPIO subnodes in sysreg - bindings documentation updated); this also better fits the reality as some variants of the motherboard don't have all the LEDs populated * syscfg bridge code has been extracted into a separate driver (placed in drivers/misc for no better place) * all the ID & MISC registers are defined as sysconf making them available for other drivers should they need to use them (and also to the user via /sys/kernel/debug/regmap which can be helpful in platform debugging) Signed-off-by: Pawel Moll Acked-by: Lee Jones --- .../devicetree/bindings/arm/vexpress-sysreg.txt | 36 +- arch/arm/boot/dts/vexpress-v2m-rs1.dtsi | 76 ++- arch/arm/boot/dts/vexpress-v2m.dtsi | 76 ++- arch/arm/mach-vexpress/ct-ca9x4.c | 2 +- arch/arm/mach-vexpress/v2m.c | 15 +- drivers/mfd/Kconfig | 15 +- drivers/mfd/Makefile | 2 +- drivers/mfd/vexpress-sysreg.c | 533 ++++++--------------- drivers/misc/Kconfig | 9 + drivers/misc/Makefile | 1 + drivers/misc/vexpress-syscfg.c | 324 +++++++++++++ include/linux/vexpress.h | 16 +- 12 files changed, 667 insertions(+), 438 deletions(-) create mode 100644 drivers/misc/vexpress-syscfg.c (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt b/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt index 57b423f78995..00318d083c9e 100644 --- a/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt +++ b/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt @@ -8,6 +8,8 @@ interrupt generation, MMC and NOR Flash control etc. Required node properties: - compatible value : = "arm,vexpress,sysreg"; - reg : physical base address and the size of the registers window + +Deprecated properties, replaced by GPIO subnodes (see below): - gpio-controller : specifies that the node is a GPIO controller - #gpio-cells : size of the GPIO specifier, should be 2: - first cell is the pseudo-GPIO line number: @@ -16,12 +18,42 @@ Required node properties: 2 - NOR FLASH WPn - second cell can take standard GPIO flags (currently ignored). +Control registers providing pseudo-GPIO lines must be represented +by subnodes, each of them requiring the following properties: +- compatible value : one of + "arm,vexpress-sysreg,sys_led" + "arm,vexpress-sysreg,sys_mci" + "arm,vexpress-sysreg,sys_flash" +- gpio-controller : makes the node a GPIO controller +- #gpio-cells : size of the GPIO specifier, must be 2: + - first cell is the function number: + - for sys_led : 0..7 = LED 0..7 + - for sys_mci : 0 = MMC CARDIN, 1 = MMC WPROT + - for sys_flash : 0 = NOR FLASH WPn + - second cell can take standard GPIO flags (currently ignored). + Example: v2m_sysreg: sysreg@10000000 { compatible = "arm,vexpress-sysreg"; reg = <0x10000000 0x1000>; - gpio-controller; - #gpio-cells = <2>; + + v2m_led_gpios: sys_led@08 { + compatible = "arm,vexpress-sysreg,sys_led"; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_mmc_gpios: sys_mci@48 { + compatible = "arm,vexpress-sysreg,sys_mci"; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_flash_gpios: sys_flash@4c { + compatible = "arm,vexpress-sysreg,sys_flash"; + gpio-controller; + #gpio-cells = <2>; + }; }; This block also can also act a bridge to the platform's configuration diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi index ac870fb3fa0d..756c986995a3 100644 --- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi @@ -74,8 +74,24 @@ v2m_sysreg: sysreg@010000 { compatible = "arm,vexpress-sysreg"; reg = <0x010000 0x1000>; - gpio-controller; - #gpio-cells = <2>; + + v2m_led_gpios: sys_led@08 { + compatible = "arm,vexpress-sysreg,sys_led"; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_mmc_gpios: sys_mci@48 { + compatible = "arm,vexpress-sysreg,sys_mci"; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_flash_gpios: sys_flash@4c { + compatible = "arm,vexpress-sysreg,sys_flash"; + gpio-controller; + #gpio-cells = <2>; + }; }; v2m_sysctl: sysctl@020000 { @@ -113,8 +129,8 @@ compatible = "arm,pl180", "arm,primecell"; reg = <0x050000 0x1000>; interrupts = <9 10>; - cd-gpios = <&v2m_sysreg 0 0>; - wp-gpios = <&v2m_sysreg 1 0>; + cd-gpios = <&v2m_mmc_gpios 0 0>; + wp-gpios = <&v2m_mmc_gpios 1 0>; max-frequency = <12000000>; vmmc-supply = <&v2m_fixed_3v3>; clocks = <&v2m_clk24mhz>, <&smbclk>; @@ -265,6 +281,58 @@ clock-output-names = "v2m:refclk32khz"; }; + leds { + compatible = "gpio-leds"; + + user@1 { + label = "v2m:green:user1"; + gpios = <&v2m_led_gpios 0 0>; + linux,default-trigger = "heartbeat"; + }; + + user@2 { + label = "v2m:green:user2"; + gpios = <&v2m_led_gpios 1 0>; + linux,default-trigger = "mmc0"; + }; + + user@3 { + label = "v2m:green:user3"; + gpios = <&v2m_led_gpios 2 0>; + linux,default-trigger = "cpu0"; + }; + + user@4 { + label = "v2m:green:user4"; + gpios = <&v2m_led_gpios 3 0>; + linux,default-trigger = "cpu1"; + }; + + user@5 { + label = "v2m:green:user5"; + gpios = <&v2m_led_gpios 4 0>; + linux,default-trigger = "cpu2"; + }; + + user@6 { + label = "v2m:green:user6"; + gpios = <&v2m_led_gpios 5 0>; + linux,default-trigger = "cpu3"; + }; + + user@7 { + label = "v2m:green:user7"; + gpios = <&v2m_led_gpios 6 0>; + linux,default-trigger = "cpu4"; + }; + + user@8 { + label = "v2m:green:user8"; + gpios = <&v2m_led_gpios 7 0>; + linux,default-trigger = "cpu5"; + }; + }; + mcc { compatible = "arm,vexpress,config-bus"; arm,vexpress,config-bridge = <&v2m_sysreg>; diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi index f1420368355b..ba856d604fb7 100644 --- a/arch/arm/boot/dts/vexpress-v2m.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m.dtsi @@ -73,8 +73,24 @@ v2m_sysreg: sysreg@00000 { compatible = "arm,vexpress-sysreg"; reg = <0x00000 0x1000>; - gpio-controller; - #gpio-cells = <2>; + + v2m_led_gpios: sys_led@08 { + compatible = "arm,vexpress-sysreg,sys_led"; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_mmc_gpios: sys_mci@48 { + compatible = "arm,vexpress-sysreg,sys_mci"; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_flash_gpios: sys_flash@4c { + compatible = "arm,vexpress-sysreg,sys_flash"; + gpio-controller; + #gpio-cells = <2>; + }; }; v2m_sysctl: sysctl@01000 { @@ -112,8 +128,8 @@ compatible = "arm,pl180", "arm,primecell"; reg = <0x05000 0x1000>; interrupts = <9 10>; - cd-gpios = <&v2m_sysreg 0 0>; - wp-gpios = <&v2m_sysreg 1 0>; + cd-gpios = <&v2m_mmc_gpios 0 0>; + wp-gpios = <&v2m_mmc_gpios 1 0>; max-frequency = <12000000>; vmmc-supply = <&v2m_fixed_3v3>; clocks = <&v2m_clk24mhz>, <&smbclk>; @@ -264,6 +280,58 @@ clock-output-names = "v2m:refclk32khz"; }; + leds { + compatible = "gpio-leds"; + + user@1 { + label = "v2m:green:user1"; + gpios = <&v2m_led_gpios 0 0>; + linux,default-trigger = "heartbeat"; + }; + + user@2 { + label = "v2m:green:user2"; + gpios = <&v2m_led_gpios 1 0>; + linux,default-trigger = "mmc0"; + }; + + user@3 { + label = "v2m:green:user3"; + gpios = <&v2m_led_gpios 2 0>; + linux,default-trigger = "cpu0"; + }; + + user@4 { + label = "v2m:green:user4"; + gpios = <&v2m_led_gpios 3 0>; + linux,default-trigger = "cpu1"; + }; + + user@5 { + label = "v2m:green:user5"; + gpios = <&v2m_led_gpios 4 0>; + linux,default-trigger = "cpu2"; + }; + + user@6 { + label = "v2m:green:user6"; + gpios = <&v2m_led_gpios 5 0>; + linux,default-trigger = "cpu3"; + }; + + user@7 { + label = "v2m:green:user7"; + gpios = <&v2m_led_gpios 6 0>; + linux,default-trigger = "cpu4"; + }; + + user@8 { + label = "v2m:green:user8"; + gpios = <&v2m_led_gpios 7 0>; + linux,default-trigger = "cpu5"; + }; + }; + mcc { compatible = "arm,vexpress,config-bus"; arm,vexpress,config-bridge = <&v2m_sysreg>; diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 35e394aa00e5..494d70bfddad 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -160,7 +160,7 @@ static void __init ct_ca9x4_init(void) amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); platform_device_register(&pmu_device); - vexpress_sysreg_config_device_register(&osc1_device); + vexpress_syscfg_device_register(&osc1_device); } #ifdef CONFIG_SMP diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index ac95220a5019..90f04c9b11d2 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c @@ -201,8 +201,9 @@ static struct platform_device v2m_cf_device = { static struct mmci_platform_data v2m_mmci_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .gpio_wp = VEXPRESS_GPIO_MMC_WPROT, - .gpio_cd = VEXPRESS_GPIO_MMC_CARDIN, + .status = vexpress_get_mci_cardin, + .gpio_cd = -1, + .gpio_wp = -1, }; static struct resource v2m_sysreg_resources[] = { @@ -351,10 +352,10 @@ static void __init v2m_init(void) for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++) amba_device_register(v2m_amba_devs[i], &iomem_resource); - vexpress_sysreg_config_device_register(&v2m_muxfpga_device); - vexpress_sysreg_config_device_register(&v2m_shutdown_device); - vexpress_sysreg_config_device_register(&v2m_reboot_device); - vexpress_sysreg_config_device_register(&v2m_dvimode_device); + vexpress_syscfg_device_register(&v2m_muxfpga_device); + vexpress_syscfg_device_register(&v2m_shutdown_device); + vexpress_syscfg_device_register(&v2m_reboot_device); + vexpress_syscfg_device_register(&v2m_dvimode_device); ct_desc->init_tile(); } @@ -409,8 +410,6 @@ void __init v2m_dt_init_early(void) { u32 dt_hbi; - vexpress_sysreg_of_early_init(); - /* Confirm board type against DT property, if available */ if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { u32 hbi = vexpress_get_hbi(VEXPRESS_SITE_MASTER); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 33834120d057..490fd48a9541 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1227,12 +1227,17 @@ config MCP_UCB1200_TS endmenu -config VEXPRESS_CONFIG - bool "ARM Versatile Express platform infrastructure" - depends on ARM || ARM64 +config MFD_VEXPRESS_SYSREG + bool "Versatile Express System Registers" + depends on VEXPRESS_CONFIG + default y + select CLKSRC_MMIO + select GPIO_GENERIC_PLATFORM + select MFD_CORE + select MFD_SYSCON help - Platform configuration infrastructure for the ARM Ltd. - Versatile Express. + System Registers are the platform configuration block + on the ARM Ltd. Versatile Express board. endmenu endif diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 9ba838eb5131..cec3487b539e 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -161,7 +161,7 @@ obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o obj-$(CONFIG_MFD_SYSCON) += syscon.o obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o -obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-sysreg.o +obj-$(CONFIG_MFD_VEXPRESS_SYSREG) += vexpress-sysreg.o obj-$(CONFIG_MFD_RETU) += retu-mfd.o obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index b4138a7168db..952df843b6be 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -11,25 +11,22 @@ * Copyright (C) 2012 ARM Limited */ +#include #include -#include #include -#include +#include #include #include +#include #include -#include -#include #include #include -#include #include #define SYS_ID 0x000 #define SYS_SW 0x004 #define SYS_LED 0x008 #define SYS_100HZ 0x024 -#define SYS_FLAGS 0x030 #define SYS_FLAGSSET 0x030 #define SYS_FLAGSCLR 0x034 #define SYS_NVFLAGS 0x038 @@ -51,36 +48,32 @@ #define SYS_ID_HBI_SHIFT 16 #define SYS_PROCIDx_HBI_SHIFT 0 -#define SYS_LED_LED(n) (1 << (n)) - #define SYS_MCI_CARDIN (1 << 0) #define SYS_MCI_WPROT (1 << 1) -#define SYS_FLASH_WPn (1 << 0) - #define SYS_MISC_MASTERSITE (1 << 14) -#define SYS_CFGCTRL_START (1 << 31) -#define SYS_CFGCTRL_WRITE (1 << 30) -#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) -#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) -#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) -#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) -#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) -#define SYS_CFGSTAT_ERR (1 << 1) -#define SYS_CFGSTAT_COMPLETE (1 << 0) +static void __iomem *__vexpress_sysreg_base; +static void __iomem *vexpress_sysreg_base(void) +{ + if (!__vexpress_sysreg_base) { + struct device_node *node = of_find_compatible_node(NULL, NULL, + "arm,vexpress-sysreg"); -static void __iomem *vexpress_sysreg_base; -static struct device *vexpress_sysreg_dev; -static LIST_HEAD(vexpress_sysreg_config_funcs); -static struct device *vexpress_sysreg_config_bridge; + __vexpress_sysreg_base = of_iomap(node, 0); + } + + WARN_ON(!__vexpress_sysreg_base); + + return __vexpress_sysreg_base; +} static int vexpress_sysreg_get_master(void) { - if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) + if (readl(vexpress_sysreg_base() + SYS_MISC) & SYS_MISC_MASTERSITE) return VEXPRESS_SITE_DB2; return VEXPRESS_SITE_DB1; @@ -88,8 +81,13 @@ static int vexpress_sysreg_get_master(void) void vexpress_flags_set(u32 data) { - writel(~0, vexpress_sysreg_base + SYS_FLAGSCLR); - writel(data, vexpress_sysreg_base + SYS_FLAGSSET); + writel(~0, vexpress_sysreg_base() + SYS_FLAGSCLR); + writel(data, vexpress_sysreg_base() + SYS_FLAGSSET); +} + +unsigned int vexpress_get_mci_cardin(struct device *dev) +{ + return readl(vexpress_sysreg_base() + SYS_MCI) & SYS_MCI_CARDIN; } u32 vexpress_get_procid(int site) @@ -97,7 +95,7 @@ u32 vexpress_get_procid(int site) if (site == VEXPRESS_SITE_MASTER) site = vexpress_sysreg_get_master(); - return readl(vexpress_sysreg_base + (site == VEXPRESS_SITE_DB1 ? + return readl(vexpress_sysreg_base() + (site == VEXPRESS_SITE_DB1 ? SYS_PROCID0 : SYS_PROCID1)); } @@ -107,7 +105,7 @@ u32 vexpress_get_hbi(int site) switch (site) { case VEXPRESS_SITE_MB: - id = readl(vexpress_sysreg_base + SYS_ID); + id = readl(vexpress_sysreg_base() + SYS_ID); return (id >> SYS_ID_HBI_SHIFT) & SYS_HBI_MASK; case VEXPRESS_SITE_MASTER: case VEXPRESS_SITE_DB1: @@ -121,406 +119,143 @@ u32 vexpress_get_hbi(int site) void __iomem *vexpress_get_24mhz_clock_base(void) { - return vexpress_sysreg_base + SYS_24MHZ; -} - - -struct vexpress_sysreg_config_func { - struct list_head list; - struct regmap *regmap; - int num_templates; - u32 template[0]; /* Keep this last */ -}; - -static int vexpress_sysreg_config_exec(struct vexpress_sysreg_config_func *func, - int index, bool write, u32 *data) -{ - u32 command, status; - int tries; - long timeout; - - if (WARN_ON(!vexpress_sysreg_base)) - return -ENOENT; - - if (WARN_ON(index > func->num_templates)) - return -EINVAL; - - command = readl(vexpress_sysreg_base + SYS_CFGCTRL); - if (WARN_ON(command & SYS_CFGCTRL_START)) - return -EBUSY; - - command = func->template[index]; - command |= SYS_CFGCTRL_START; - command |= write ? SYS_CFGCTRL_WRITE : 0; - - /* Use a canary for reads */ - if (!write) - *data = 0xdeadbeef; - - dev_dbg(vexpress_sysreg_dev, "command %x, data %x\n", - command, *data); - writel(*data, vexpress_sysreg_base + SYS_CFGDATA); - writel(0, vexpress_sysreg_base + SYS_CFGSTAT); - writel(command, vexpress_sysreg_base + SYS_CFGCTRL); - mb(); - - /* The operation can take ages... Go to sleep, 100us initially */ - tries = 100; - timeout = 100; - do { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(usecs_to_jiffies(timeout)); - if (signal_pending(current)) - return -EINTR; - - status = readl(vexpress_sysreg_base + SYS_CFGSTAT); - if (status & SYS_CFGSTAT_ERR) - return -EFAULT; - - if (timeout > 20) - timeout -= 20; - } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); - if (WARN_ON_ONCE(!tries)) - return -ETIMEDOUT; - - if (!write) { - *data = readl(vexpress_sysreg_base + SYS_CFGDATA); - dev_dbg(vexpress_sysreg_dev, "func %p, read data %x\n", - func, *data); - } - - return 0; -} - -static int vexpress_sysreg_config_read(void *context, unsigned int index, - unsigned int *val) -{ - struct vexpress_sysreg_config_func *func = context; - - return vexpress_sysreg_config_exec(func, index, false, val); -} - -static int vexpress_sysreg_config_write(void *context, unsigned int index, - unsigned int val) -{ - struct vexpress_sysreg_config_func *func = context; - - return vexpress_sysreg_config_exec(func, index, true, &val); -} - -struct regmap_config vexpress_sysreg_regmap_config = { - .lock = vexpress_config_lock, - .unlock = vexpress_config_unlock, - .reg_bits = 32, - .val_bits = 32, - .reg_read = vexpress_sysreg_config_read, - .reg_write = vexpress_sysreg_config_write, - .reg_format_endian = REGMAP_ENDIAN_LITTLE, - .val_format_endian = REGMAP_ENDIAN_LITTLE, -}; - -static struct regmap *vexpress_sysreg_config_regmap_init(struct device *dev, - void *context) -{ - struct platform_device *pdev = to_platform_device(dev); - struct vexpress_sysreg_config_func *func; - struct property *prop; - const __be32 *val = NULL; - __be32 energy_quirk[4]; - int num; - u32 site, position, dcc; - int err; - int i; - - if (dev->of_node) { - err = vexpress_config_get_topo(dev->of_node, &site, &position, - &dcc); - if (err) - return ERR_PTR(err); - - prop = of_find_property(dev->of_node, - "arm,vexpress-sysreg,func", NULL); - if (!prop) - return ERR_PTR(-EINVAL); - - num = prop->length / sizeof(u32) / 2; - val = prop->value; - } else { - if (pdev->num_resources != 1 || - pdev->resource[0].flags != IORESOURCE_BUS) - return ERR_PTR(-EFAULT); - - site = pdev->resource[0].start; - if (site == VEXPRESS_SITE_MASTER) - site = vexpress_sysreg_get_master(); - position = 0; - dcc = 0; - num = 1; - } - - /* - * "arm,vexpress-energy" function used to be described - * by its first device only, now it requires both - */ - if (num == 1 && of_device_is_compatible(dev->of_node, - "arm,vexpress-energy")) { - num = 2; - energy_quirk[0] = *val; - energy_quirk[2] = *val++; - energy_quirk[1] = *val; - energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); - val = energy_quirk; - } - - func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, - GFP_KERNEL); - if (!func) - return NULL; - - func->num_templates = num; - - for (i = 0; i < num; i++) { - u32 function, device; - - if (dev->of_node) { - function = be32_to_cpup(val++); - device = be32_to_cpup(val++); - } else { - function = pdev->resource[0].end; - device = pdev->id; - } - - dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", - func, site, position, dcc, - function, device); - - func->template[i] = SYS_CFGCTRL_DCC(dcc); - func->template[i] |= SYS_CFGCTRL_SITE(site); - func->template[i] |= SYS_CFGCTRL_POSITION(position); - func->template[i] |= SYS_CFGCTRL_FUNC(function); - func->template[i] |= SYS_CFGCTRL_DEVICE(device); - } - - vexpress_sysreg_regmap_config.max_register = num - 1; - - func->regmap = regmap_init(dev, NULL, func, - &vexpress_sysreg_regmap_config); - - if (IS_ERR(func->regmap)) - kfree(func); - else - list_add(&func->list, &vexpress_sysreg_config_funcs); - - return func->regmap; -} - -static void vexpress_sysreg_config_regmap_exit(struct regmap *regmap, - void *context) -{ - struct vexpress_sysreg_config_func *func, *tmp; - - regmap_exit(regmap); - - list_for_each_entry_safe(func, tmp, &vexpress_sysreg_config_funcs, - list) { - if (func->regmap == regmap) { - list_del(&vexpress_sysreg_config_funcs); - kfree(func); - break; - } - } -} - -static struct vexpress_config_bridge_ops vexpress_sysreg_config_bridge_ops = { - .regmap_init = vexpress_sysreg_config_regmap_init, - .regmap_exit = vexpress_sysreg_config_regmap_exit, -}; - -int vexpress_sysreg_config_device_register(struct platform_device *pdev) -{ - pdev->dev.parent = vexpress_sysreg_config_bridge; - - return platform_device_register(pdev); + return vexpress_sysreg_base() + SYS_24MHZ; } void __init vexpress_sysreg_early_init(void __iomem *base) { - vexpress_sysreg_base = base; - vexpress_config_set_master(vexpress_sysreg_get_master()); -} - -void __init vexpress_sysreg_of_early_init(void) -{ - struct device_node *node; - - if (vexpress_sysreg_base) - return; - - node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg"); - if (WARN_ON(!node)) - return; - - vexpress_sysreg_base = of_iomap(node, 0); - if (WARN_ON(!vexpress_sysreg_base)) - return; + __vexpress_sysreg_base = base; vexpress_config_set_master(vexpress_sysreg_get_master()); } -#ifdef CONFIG_GPIOLIB - -#define VEXPRESS_SYSREG_GPIO(_name, _reg, _value) \ - [VEXPRESS_GPIO_##_name] = { \ - .reg = _reg, \ - .value = _reg##_##_value, \ - } +/* The sysreg block is just a random collection of various functions... */ -static struct vexpress_sysreg_gpio { - unsigned long reg; - u32 value; -} vexpress_sysreg_gpios[] = { - VEXPRESS_SYSREG_GPIO(MMC_CARDIN, SYS_MCI, CARDIN), - VEXPRESS_SYSREG_GPIO(MMC_WPROT, SYS_MCI, WPROT), - VEXPRESS_SYSREG_GPIO(FLASH_WPn, SYS_FLASH, WPn), - VEXPRESS_SYSREG_GPIO(LED0, SYS_LED, LED(0)), - VEXPRESS_SYSREG_GPIO(LED1, SYS_LED, LED(1)), - VEXPRESS_SYSREG_GPIO(LED2, SYS_LED, LED(2)), - VEXPRESS_SYSREG_GPIO(LED3, SYS_LED, LED(3)), - VEXPRESS_SYSREG_GPIO(LED4, SYS_LED, LED(4)), - VEXPRESS_SYSREG_GPIO(LED5, SYS_LED, LED(5)), - VEXPRESS_SYSREG_GPIO(LED6, SYS_LED, LED(6)), - VEXPRESS_SYSREG_GPIO(LED7, SYS_LED, LED(7)), +static struct syscon_platform_data vexpress_sysreg_sys_id_pdata = { + .label = "sys_id", }; -static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip, - unsigned offset) -{ - return 0; -} - -static int vexpress_sysreg_gpio_get(struct gpio_chip *chip, - unsigned offset) -{ - struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset]; - u32 reg_value = readl(vexpress_sysreg_base + gpio->reg); - - return !!(reg_value & gpio->value); -} - -static void vexpress_sysreg_gpio_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset]; - u32 reg_value = readl(vexpress_sysreg_base + gpio->reg); - - if (value) - reg_value |= gpio->value; - else - reg_value &= ~gpio->value; - - writel(reg_value, vexpress_sysreg_base + gpio->reg); -} - -static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - vexpress_sysreg_gpio_set(chip, offset, value); - - return 0; -} - -static struct gpio_chip vexpress_sysreg_gpio_chip = { - .label = "vexpress-sysreg", - .direction_input = vexpress_sysreg_gpio_direction_input, - .direction_output = vexpress_sysreg_gpio_direction_output, - .get = vexpress_sysreg_gpio_get, - .set = vexpress_sysreg_gpio_set, - .ngpio = ARRAY_SIZE(vexpress_sysreg_gpios), - .base = 0, +static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = { + .label = "sys_led", + .base = -1, + .ngpio = 8, }; - -#define VEXPRESS_SYSREG_GREEN_LED(_name, _default_trigger, _gpio) \ - { \ - .name = "v2m:green:"_name, \ - .default_trigger = _default_trigger, \ - .gpio = VEXPRESS_GPIO_##_gpio, \ - } - -struct gpio_led vexpress_sysreg_leds[] = { - VEXPRESS_SYSREG_GREEN_LED("user1", "heartbeat", LED0), - VEXPRESS_SYSREG_GREEN_LED("user2", "mmc0", LED1), - VEXPRESS_SYSREG_GREEN_LED("user3", "cpu0", LED2), - VEXPRESS_SYSREG_GREEN_LED("user4", "cpu1", LED3), - VEXPRESS_SYSREG_GREEN_LED("user5", "cpu2", LED4), - VEXPRESS_SYSREG_GREEN_LED("user6", "cpu3", LED5), - VEXPRESS_SYSREG_GREEN_LED("user7", "cpu4", LED6), - VEXPRESS_SYSREG_GREEN_LED("user8", "cpu5", LED7), +static struct bgpio_pdata vexpress_sysreg_sys_mci_pdata = { + .label = "sys_mci", + .base = -1, + .ngpio = 2, }; -struct gpio_led_platform_data vexpress_sysreg_leds_pdata = { - .num_leds = ARRAY_SIZE(vexpress_sysreg_leds), - .leds = vexpress_sysreg_leds, +static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = { + .label = "sys_flash", + .base = -1, + .ngpio = 1, }; -#endif - +static struct syscon_platform_data vexpress_sysreg_sys_misc_pdata = { + .label = "sys_misc", +}; -static ssize_t vexpress_sysreg_sys_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "0x%08x\n", readl(vexpress_sysreg_base + SYS_ID)); -} +static struct syscon_platform_data vexpress_sysreg_sys_procid_pdata = { + .label = "sys_procid", +}; -DEVICE_ATTR(sys_id, S_IRUGO, vexpress_sysreg_sys_id_show, NULL); +static struct mfd_cell vexpress_sysreg_cells[] = { + { + .name = "syscon", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_ID, 0x4), + }, + .platform_data = &vexpress_sysreg_sys_id_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_id_pdata), + }, { + .name = "basic-mmio-gpio", + .of_compatible = "arm,vexpress-sysreg,sys_led", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), + }, + .platform_data = &vexpress_sysreg_sys_led_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata), + }, { + .name = "basic-mmio-gpio", + .of_compatible = "arm,vexpress-sysreg,sys_mci", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), + }, + .platform_data = &vexpress_sysreg_sys_mci_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata), + }, { + .name = "basic-mmio-gpio", + .of_compatible = "arm,vexpress-sysreg,sys_flash", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), + }, + .platform_data = &vexpress_sysreg_sys_flash_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata), + }, { + .name = "syscon", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_MISC, 0x4), + }, + .platform_data = &vexpress_sysreg_sys_misc_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_misc_pdata), + }, { + .name = "syscon", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_PROCID0, 0x8), + }, + .platform_data = &vexpress_sysreg_sys_procid_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_procid_pdata), + }, { + .name = "vexpress-syscfg", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_CFGDATA, 0xc), + }, + } +}; static int vexpress_sysreg_probe(struct platform_device *pdev) { - int err; - struct resource *res = platform_get_resource(pdev, - IORESOURCE_MEM, 0); - - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) { - dev_err(&pdev->dev, "Failed to request memory region!\n"); - return -EBUSY; - } + struct resource *mem; + void __iomem *base; + struct bgpio_chip *mmc_gpio_chip; - if (!vexpress_sysreg_base) - vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; - if (!vexpress_sysreg_base) { - dev_err(&pdev->dev, "Failed to obtain base address!\n"); - return -EFAULT; - } + base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (!base) + return -ENOMEM; vexpress_config_set_master(vexpress_sysreg_get_master()); - vexpress_sysreg_dev = &pdev->dev; - -#ifdef CONFIG_GPIOLIB - vexpress_sysreg_gpio_chip.dev = &pdev->dev; - err = gpiochip_add(&vexpress_sysreg_gpio_chip); - if (err) { - dev_err(&pdev->dev, "Failed to register GPIO chip! (%d)\n", - err); - return err; - } - platform_device_register_data(vexpress_sysreg_dev, "leds-gpio", - PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata, - sizeof(vexpress_sysreg_leds_pdata)); -#endif - - vexpress_sysreg_config_bridge = vexpress_config_bridge_register( - &pdev->dev, &vexpress_sysreg_config_bridge_ops, NULL); - WARN_ON(!vexpress_sysreg_config_bridge); - - device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id); - - return 0; + /* + * Duplicated SYS_MCI pseudo-GPIO controller for compatibility with + * older trees using sysreg node for MMC control lines. + */ + mmc_gpio_chip = devm_kzalloc(&pdev->dev, sizeof(*mmc_gpio_chip), + GFP_KERNEL); + if (!mmc_gpio_chip) + return -ENOMEM; + bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI, + NULL, NULL, NULL, NULL, 0); + mmc_gpio_chip->gc.ngpio = 2; + gpiochip_add(&mmc_gpio_chip->gc); + + return mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, + vexpress_sysreg_cells, + ARRAY_SIZE(vexpress_sysreg_cells), mem, 0, NULL); } static const struct of_device_id vexpress_sysreg_match[] = { diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8baff0effc7d..d9663ef90ce8 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -515,6 +515,15 @@ config SRAM the genalloc API. It is supposed to be used for small on-chip SRAM areas found on many SoCs. +config VEXPRESS_SYSCFG + bool "Versatile Express System Configuration driver" + depends on VEXPRESS_CONFIG + default y + help + ARM Ltd. Versatile Express uses specialised platform configuration + bus. System Configuration interface is one of the possible means + of generating transactions on this bus. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 7eb4b69580c0..d59ce1261b38 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -55,3 +55,4 @@ obj-$(CONFIG_SRAM) += sram.o obj-y += mic/ obj-$(CONFIG_GENWQE) += genwqe/ obj-$(CONFIG_ECHO) += echo/ +obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c new file mode 100644 index 000000000000..73068e50e56d --- /dev/null +++ b/drivers/misc/vexpress-syscfg.c @@ -0,0 +1,324 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SYS_CFGDATA 0x0 + +#define SYS_CFGCTRL 0x4 +#define SYS_CFGCTRL_START (1 << 31) +#define SYS_CFGCTRL_WRITE (1 << 30) +#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) +#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) +#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) +#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) +#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) + +#define SYS_CFGSTAT 0x8 +#define SYS_CFGSTAT_ERR (1 << 1) +#define SYS_CFGSTAT_COMPLETE (1 << 0) + + +struct vexpress_syscfg { + struct device *dev; + void __iomem *base; + struct list_head funcs; +}; + +struct vexpress_syscfg_func { + struct list_head list; + struct vexpress_syscfg *syscfg; + struct regmap *regmap; + int num_templates; + u32 template[0]; /* Keep it last! */ +}; + + +static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, + int index, bool write, u32 *data) +{ + struct vexpress_syscfg *syscfg = func->syscfg; + u32 command, status; + int tries; + long timeout; + + if (WARN_ON(index > func->num_templates)) + return -EINVAL; + + command = readl(syscfg->base + SYS_CFGCTRL); + if (WARN_ON(command & SYS_CFGCTRL_START)) + return -EBUSY; + + command = func->template[index]; + command |= SYS_CFGCTRL_START; + command |= write ? SYS_CFGCTRL_WRITE : 0; + + /* Use a canary for reads */ + if (!write) + *data = 0xdeadbeef; + + dev_dbg(syscfg->dev, "func %p, command %x, data %x\n", + func, command, *data); + writel(*data, syscfg->base + SYS_CFGDATA); + writel(0, syscfg->base + SYS_CFGSTAT); + writel(command, syscfg->base + SYS_CFGCTRL); + mb(); + + /* The operation can take ages... Go to sleep, 100us initially */ + tries = 100; + timeout = 100; + do { + if (!irqs_disabled()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(timeout)); + if (signal_pending(current)) + return -EINTR; + } else { + udelay(timeout); + } + + status = readl(syscfg->base + SYS_CFGSTAT); + if (status & SYS_CFGSTAT_ERR) + return -EFAULT; + + if (timeout > 20) + timeout -= 20; + } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); + if (WARN_ON_ONCE(!tries)) + return -ETIMEDOUT; + + if (!write) { + *data = readl(syscfg->base + SYS_CFGDATA); + dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data); + } + + return 0; +} + +static int vexpress_syscfg_read(void *context, unsigned int index, + unsigned int *val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, false, val); +} + +static int vexpress_syscfg_write(void *context, unsigned int index, + unsigned int val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, true, &val); +} + +struct regmap_config vexpress_syscfg_regmap_config = { + .lock = vexpress_config_lock, + .unlock = vexpress_config_unlock, + .reg_bits = 32, + .val_bits = 32, + .reg_read = vexpress_syscfg_read, + .reg_write = vexpress_syscfg_write, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + + +static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, + void *context) +{ + struct platform_device *pdev = to_platform_device(dev); + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func; + struct property *prop; + const __be32 *val = NULL; + __be32 energy_quirk[4]; + int num; + u32 site, position, dcc; + int i; + + if (dev->of_node) { + int err = vexpress_config_get_topo(dev->of_node, &site, + &position, &dcc); + + if (err) + return ERR_PTR(err); + + prop = of_find_property(dev->of_node, + "arm,vexpress-sysreg,func", NULL); + if (!prop) + return ERR_PTR(-EINVAL); + + num = prop->length / sizeof(u32) / 2; + val = prop->value; + } else { + if (pdev->num_resources != 1 || + pdev->resource[0].flags != IORESOURCE_BUS) + return ERR_PTR(-EFAULT); + + site = pdev->resource[0].start; + if (site == VEXPRESS_SITE_MASTER) + site = vexpress_config_get_master(); + position = 0; + dcc = 0; + num = 1; + } + + /* + * "arm,vexpress-energy" function used to be described + * by its first device only, now it requires both + */ + if (num == 1 && of_device_is_compatible(dev->of_node, + "arm,vexpress-energy")) { + num = 2; + energy_quirk[0] = *val; + energy_quirk[2] = *val++; + energy_quirk[1] = *val; + energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); + val = energy_quirk; + } + + func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, + GFP_KERNEL); + if (!func) + return NULL; + + func->syscfg = syscfg; + func->num_templates = num; + + for (i = 0; i < num; i++) { + u32 function, device; + + if (dev->of_node) { + function = be32_to_cpup(val++); + device = be32_to_cpup(val++); + } else { + function = pdev->resource[0].end; + device = pdev->id; + } + + dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", + func, site, position, dcc, + function, device); + + func->template[i] = SYS_CFGCTRL_DCC(dcc); + func->template[i] |= SYS_CFGCTRL_SITE(site); + func->template[i] |= SYS_CFGCTRL_POSITION(position); + func->template[i] |= SYS_CFGCTRL_FUNC(function); + func->template[i] |= SYS_CFGCTRL_DEVICE(device); + } + + vexpress_syscfg_regmap_config.max_register = num - 1; + + func->regmap = regmap_init(dev, NULL, func, + &vexpress_syscfg_regmap_config); + + if (IS_ERR(func->regmap)) + kfree(func); + else + list_add(&func->list, &syscfg->funcs); + + return func->regmap; +} + +static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) +{ + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func, *tmp; + + regmap_exit(regmap); + + list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) { + if (func->regmap == regmap) { + list_del(&syscfg->funcs); + kfree(func); + break; + } + } +} + +static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { + .regmap_init = vexpress_syscfg_regmap_init, + .regmap_exit = vexpress_syscfg_regmap_exit, +}; + + +/* Non-DT hack, to be gone... */ +static struct device *vexpress_syscfg_bridge; + +int vexpress_syscfg_device_register(struct platform_device *pdev) +{ + pdev->dev.parent = vexpress_syscfg_bridge; + + return platform_device_register(pdev); +} + + +int vexpress_syscfg_probe(struct platform_device *pdev) +{ + struct vexpress_syscfg *syscfg; + struct resource *res; + struct device *bridge; + + syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL); + if (!syscfg) + return -ENOMEM; + syscfg->dev = &pdev->dev; + INIT_LIST_HEAD(&syscfg->funcs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!syscfg->base) + return -EFAULT; + + /* Must use dev.parent (MFD), as that's where DT phandle points at... */ + bridge = vexpress_config_bridge_register(pdev->dev.parent, + &vexpress_syscfg_bridge_ops, syscfg); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + /* Non-DT case */ + if (!pdev->dev.of_node) + vexpress_syscfg_bridge = bridge; + + return 0; +} + +static const struct platform_device_id vexpress_syscfg_id_table[] = { + { "vexpress-syscfg", }, + {}, +}; + +static struct platform_driver vexpress_syscfg_driver = { + .driver.name = "vexpress-syscfg", + .id_table = vexpress_syscfg_id_table, + .probe = vexpress_syscfg_probe, +}; + +static int __init vexpress_syscfg_init(void) +{ + return platform_driver_register(&vexpress_syscfg_driver); +} +core_initcall(vexpress_syscfg_init); diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index 6b206ba6aa0e..46636e3f43fd 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -24,18 +24,6 @@ #define VEXPRESS_SITE_DB2 2 #define VEXPRESS_SITE_MASTER 0xf -#define VEXPRESS_GPIO_MMC_CARDIN 0 -#define VEXPRESS_GPIO_MMC_WPROT 1 -#define VEXPRESS_GPIO_FLASH_WPn 2 -#define VEXPRESS_GPIO_LED0 3 -#define VEXPRESS_GPIO_LED1 4 -#define VEXPRESS_GPIO_LED2 5 -#define VEXPRESS_GPIO_LED3 6 -#define VEXPRESS_GPIO_LED4 7 -#define VEXPRESS_GPIO_LED5 8 -#define VEXPRESS_GPIO_LED6 9 -#define VEXPRESS_GPIO_LED7 10 - #define VEXPRESS_RES_FUNC(_site, _func) \ { \ .start = (_site), \ @@ -70,14 +58,14 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); /* Platform control */ +unsigned int vexpress_get_mci_cardin(struct device *dev); u32 vexpress_get_procid(int site); u32 vexpress_get_hbi(int site); void *vexpress_get_24mhz_clock_base(void); void vexpress_flags_set(u32 data); void vexpress_sysreg_early_init(void __iomem *base); -void vexpress_sysreg_of_early_init(void); -int vexpress_sysreg_config_device_register(struct platform_device *pdev); +int vexpress_syscfg_device_register(struct platform_device *pdev); /* Clocks */ -- cgit v1.2.3 From 6b2c31c71d6fa8896c5f3f2354d790a5bd3f0a1e Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Thu, 6 Feb 2014 14:33:44 +0000 Subject: ARM: vexpress: move HBI check to sysreg driver The last reason for static memory mapping is the HBI (board identification number) check early in the machine code. Moving the check to the sysreg driver makes it possible to completely remove the early mapping and init functions. Signed-off-by: Pawel Moll Acked-by: Lee Jones --- arch/arm/mach-vexpress/v2m.c | 49 ------------------------------------------- drivers/mfd/vexpress-sysreg.c | 30 ++++++++++---------------- include/linux/vexpress.h | 1 - 3 files changed, 11 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index d8b419bcf3c3..38f4f6f37770 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c @@ -370,53 +370,6 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express") .init_machine = v2m_init, MACHINE_END -static struct map_desc v2m_rs1_io_desc __initdata = { - .virtual = V2M_PERIPH, - .pfn = __phys_to_pfn(0x1c000000), - .length = SZ_2M, - .type = MT_DEVICE, -}; - -static int __init v2m_dt_scan_memory_map(unsigned long node, const char *uname, - int depth, void *data) -{ - const char **map = data; - - if (strcmp(uname, "motherboard") != 0) - return 0; - - *map = of_get_flat_dt_prop(node, "arm,v2m-memory-map", NULL); - - return 1; -} - -void __init v2m_dt_map_io(void) -{ - const char *map = NULL; - - of_scan_flat_dt(v2m_dt_scan_memory_map, &map); - - if (map && strcmp(map, "rs1") == 0) - iotable_init(&v2m_rs1_io_desc, 1); - else - iotable_init(v2m_io_desc, ARRAY_SIZE(v2m_io_desc)); -} - -void __init v2m_dt_init_early(void) -{ - u32 dt_hbi; - - /* Confirm board type against DT property, if available */ - if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { - u32 hbi = vexpress_get_hbi(VEXPRESS_SITE_MASTER); - - if (WARN_ON(dt_hbi != hbi)) - pr_warning("vexpress: DT HBI (%x) is not matching " - "hardware (%x)!\n", dt_hbi, hbi); - } -} - - static void __init v2m_dt_init(void) { l2x0_of_init(0x00400000, 0xfe0fffff); @@ -432,7 +385,5 @@ DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express") .dt_compat = v2m_dt_match, .smp = smp_ops(vexpress_smp_dt_ops), .smp_init = smp_init_ops(vexpress_smp_init_ops), - .map_io = v2m_dt_map_io, - .init_early = v2m_dt_init_early, .init_machine = v2m_dt_init, MACHINE_END diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index 952df843b6be..9e21e4fc9599 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -45,7 +45,6 @@ #define SYS_CFGSTAT 0x0a8 #define SYS_HBI_MASK 0xfff -#define SYS_ID_HBI_SHIFT 16 #define SYS_PROCIDx_HBI_SHIFT 0 #define SYS_MCI_CARDIN (1 << 0) @@ -99,24 +98,6 @@ u32 vexpress_get_procid(int site) SYS_PROCID0 : SYS_PROCID1)); } -u32 vexpress_get_hbi(int site) -{ - u32 id; - - switch (site) { - case VEXPRESS_SITE_MB: - id = readl(vexpress_sysreg_base() + SYS_ID); - return (id >> SYS_ID_HBI_SHIFT) & SYS_HBI_MASK; - case VEXPRESS_SITE_MASTER: - case VEXPRESS_SITE_DB1: - case VEXPRESS_SITE_DB2: - id = vexpress_get_procid(site); - return (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; - } - - return ~0; -} - void __iomem *vexpress_get_24mhz_clock_base(void) { return vexpress_sysreg_base() + SYS_24MHZ; @@ -229,6 +210,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) struct resource *mem; void __iomem *base; struct bgpio_chip *mmc_gpio_chip; + u32 dt_hbi; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) @@ -240,6 +222,16 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) vexpress_config_set_master(vexpress_sysreg_get_master()); + /* Confirm board type against DT property, if available */ + if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { + u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER); + u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; + + if (WARN_ON(dt_hbi != hbi)) + dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n", + dt_hbi, hbi); + } + /* * Duplicated SYS_MCI pseudo-GPIO controller for compatibility with * older trees using sysreg node for MMC control lines. diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index 46636e3f43fd..a4c9547aae64 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -60,7 +60,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); unsigned int vexpress_get_mci_cardin(struct device *dev); u32 vexpress_get_procid(int site); -u32 vexpress_get_hbi(int site); void *vexpress_get_24mhz_clock_base(void); void vexpress_flags_set(u32 data); -- cgit v1.2.3 From a0bf37edb4d34c21bdaa19a1624378924b917491 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Sun, 6 Oct 2013 20:23:49 +0200 Subject: HSI: method to unregister clients from an hsi port This exports a method to unregister all clients from an hsi port. Signed-off-by: Sebastian Reichel Reviewed-by: Pavel Machek Tested-By: Ivaylo Dimitrov --- drivers/hsi/hsi.c | 10 ++++++++++ include/linux/hsi/hsi.h | 1 + 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 749f7b5c8179..e96a9874b1a4 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -129,6 +129,16 @@ static void hsi_port_release(struct device *dev) kfree(to_hsi_port(dev)); } +/** + * hsi_unregister_port - Unregister an HSI port + * @port: The HSI port to unregister + */ +void hsi_port_unregister_clients(struct hsi_port *port) +{ + device_for_each_child(&port->device, NULL, hsi_remove_client); +} +EXPORT_SYMBOL_GPL(hsi_port_unregister_clients); + /** * hsi_unregister_controller - Unregister an HSI controller * @hsi: The HSI controller to register diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 39bfd5b89077..5a9f1210ed22 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -282,6 +282,7 @@ struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); void hsi_put_controller(struct hsi_controller *hsi); int hsi_register_controller(struct hsi_controller *hsi); void hsi_unregister_controller(struct hsi_controller *hsi); +void hsi_port_unregister_clients(struct hsi_port *port); static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, void *data) -- cgit v1.2.3 From a088cf161cc87b39e83c7c53b9f239773422d212 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Fri, 28 Mar 2014 22:48:23 +0100 Subject: HSI: Add channel resource support to HSI clients Make HSI channel ids platform data, which can be provided by platform data. Signed-off-by: Sebastian Reichel Tested-By: Ivaylo Dimitrov --- drivers/hsi/clients/hsi_char.c | 12 +++++------ drivers/hsi/hsi.c | 46 +++++++++++++++++++++++++++++++++++++++++- include/linux/hsi/hsi.h | 24 ++++++++++++++++++---- 3 files changed, 71 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c index 30733209fde2..57f70c28fa38 100644 --- a/drivers/hsi/clients/hsi_char.c +++ b/drivers/hsi/clients/hsi_char.c @@ -367,7 +367,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) return -EINVAL; tmp = cl->rx_cfg; cl->rx_cfg.mode = rxc->mode; - cl->rx_cfg.channels = rxc->channels; + cl->rx_cfg.num_hw_channels = rxc->channels; cl->rx_cfg.flow = rxc->flow; ret = hsi_setup(cl); if (ret < 0) { @@ -383,7 +383,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc) { rxc->mode = cl->rx_cfg.mode; - rxc->channels = cl->rx_cfg.channels; + rxc->channels = cl->rx_cfg.num_hw_channels; rxc->flow = cl->rx_cfg.flow; } @@ -402,7 +402,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) return -EINVAL; tmp = cl->tx_cfg; cl->tx_cfg.mode = txc->mode; - cl->tx_cfg.channels = txc->channels; + cl->tx_cfg.num_hw_channels = txc->channels; cl->tx_cfg.speed = txc->speed; cl->tx_cfg.arb_mode = txc->arb_mode; ret = hsi_setup(cl); @@ -417,7 +417,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc) { txc->mode = cl->tx_cfg.mode; - txc->channels = cl->tx_cfg.channels; + txc->channels = cl->tx_cfg.num_hw_channels; txc->speed = cl->tx_cfg.speed; txc->arb_mode = cl->tx_cfg.arb_mode; } @@ -435,7 +435,7 @@ static ssize_t hsc_read(struct file *file, char __user *buf, size_t len, return -EINVAL; if (len > max_data_size) len = max_data_size; - if (channel->ch >= channel->cl->rx_cfg.channels) + if (channel->ch >= channel->cl->rx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_READ, &channel->flags)) return -EBUSY; @@ -492,7 +492,7 @@ static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len, return -EINVAL; if (len > max_data_size) len = max_data_size; - if (channel->ch >= channel->cl->tx_cfg.channels) + if (channel->ch >= channel->cl->tx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_WRITE, &channel->flags)) return -EBUSY; diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index e96a9874b1a4..de2ad8f20d55 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -62,18 +62,36 @@ static struct bus_type hsi_bus_type = { static void hsi_client_release(struct device *dev) { - kfree(to_hsi_client(dev)); + struct hsi_client *cl = to_hsi_client(dev); + + kfree(cl->tx_cfg.channels); + kfree(cl->rx_cfg.channels); + kfree(cl); } static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) { struct hsi_client *cl; + size_t size; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) return; + cl->tx_cfg = info->tx_cfg; + if (cl->tx_cfg.channels) { + size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); + cl->tx_cfg.channels = kzalloc(size , GFP_KERNEL); + memcpy(cl->tx_cfg.channels, info->tx_cfg.channels, size); + } + cl->rx_cfg = info->rx_cfg; + if (cl->rx_cfg.channels) { + size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels); + cl->rx_cfg.channels = kzalloc(size , GFP_KERNEL); + memcpy(cl->rx_cfg.channels, info->rx_cfg.channels, size); + } + cl->device.bus = &hsi_bus_type; cl->device.parent = &port->device; cl->device.release = hsi_client_release; @@ -502,6 +520,32 @@ int hsi_event(struct hsi_port *port, unsigned long event) } EXPORT_SYMBOL_GPL(hsi_event); +/** + * hsi_get_channel_id_by_name - acquire channel id by channel name + * @cl: HSI client, which uses the channel + * @name: name the channel is known under + * + * Clients can call this function to get the hsi channel ids similar to + * requesting IRQs or GPIOs by name. This function assumes the same + * channel configuration is used for RX and TX. + * + * Returns -errno on error or channel id on success. + */ +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name) +{ + int i; + + if (!cl->rx_cfg.channels) + return -ENOENT; + + for (i = 0; i < cl->rx_cfg.num_channels; i++) + if (!strcmp(cl->rx_cfg.channels[i].name, name)) + return cl->rx_cfg.channels[i].id; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name); + static int __init hsi_init(void) { return bus_register(&hsi_bus_type); diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 5a9f1210ed22..e3cff94bef04 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -67,18 +67,32 @@ enum { HSI_EVENT_STOP_RX, }; +/** + * struct hsi_channel - channel resource used by the hsi clients + * @id: Channel number + * @name: Channel name + */ +struct hsi_channel { + unsigned int id; + const char *name; +}; + /** * struct hsi_config - Configuration for RX/TX HSI modules * @mode: Bit transmission mode (STREAM or FRAME) - * @channels: Number of channels to use [1..16] + * @channels: Channel resources used by the client + * @num_channels: Number of channel resources + * @num_hw_channels: Number of channels the transceiver is configured for [1..16] * @speed: Max bit transmission speed (Kbit/s) * @flow: RX flow type (SYNCHRONIZED or PIPELINE) * @arb_mode: Arbitration mode for TX frame (Round robin, priority) */ struct hsi_config { - unsigned int mode; - unsigned int channels; - unsigned int speed; + unsigned int mode; + struct hsi_channel *channels; + unsigned int num_channels; + unsigned int num_hw_channels; + unsigned int speed; union { unsigned int flow; /* RX only */ unsigned int arb_mode; /* TX only */ @@ -306,6 +320,8 @@ static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, */ int hsi_async(struct hsi_client *cl, struct hsi_msg *msg); +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name); + /** * hsi_id - Get HSI controller ID associated to a client * @cl: Pointer to a HSI client -- cgit v1.2.3 From 8491451024bcfabdcebd772ce9ec2fc5757acd42 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Fri, 28 Mar 2014 22:54:25 +0100 Subject: HSI: export method to (un)register clients Expose method for registering and unregistering HSI clients, so that client drivers can register other client drivers. This is useful for HSI drivers, which want to use the functionality of other HSI drivers. For example the N900 modem driver can load HSI drivers for mcsaab protocol and speech protocol. Signed-off-by: Sebastian Reichel Reviewed-by: Pavel Machek Tested-By: Ivaylo Dimitrov --- drivers/hsi/hsi.c | 11 ++++++++--- include/linux/hsi/hsi.h | 3 +++ 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index de2ad8f20d55..834a2d6b444e 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -69,14 +69,15 @@ static void hsi_client_release(struct device *dev) kfree(cl); } -static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info) { struct hsi_client *cl; size_t size; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) - return; + return NULL; cl->tx_cfg = info->tx_cfg; if (cl->tx_cfg.channels) { @@ -103,7 +104,10 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); } + + return cl; } +EXPORT_SYMBOL_GPL(hsi_new_client); static void hsi_scan_board_info(struct hsi_controller *hsi) { @@ -119,12 +123,13 @@ static void hsi_scan_board_info(struct hsi_controller *hsi) } } -static int hsi_remove_client(struct device *dev, void *data __maybe_unused) +int hsi_remove_client(struct device *dev, void *data __maybe_unused) { device_unregister(dev); return 0; } +EXPORT_SYMBOL_GPL(hsi_remove_client); static int hsi_remove_port(struct device *dev, void *data __maybe_unused) { diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index e3cff94bef04..e20a3999a696 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -296,6 +296,9 @@ struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); void hsi_put_controller(struct hsi_controller *hsi); int hsi_register_controller(struct hsi_controller *hsi); void hsi_unregister_controller(struct hsi_controller *hsi); +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info); +int hsi_remove_client(struct device *dev, void *data); void hsi_port_unregister_clients(struct hsi_port *port); static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, -- cgit v1.2.3 From a2aa24734d9dbbd3b9062c2459936c336278fa6a Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Fri, 28 Mar 2014 22:59:43 +0100 Subject: HSI: Add common DT binding for HSI client devices Implement and document generic DT bindings for HSI clients. Signed-off-by: Sebastian Reichel Reviewed-by: Pavel Machek Tested-By: Ivaylo Dimitrov --- .../devicetree/bindings/hsi/client-devices.txt | 44 +++++ drivers/hsi/hsi.c | 208 ++++++++++++++++++++- include/linux/hsi/hsi.h | 11 ++ 3 files changed, 261 insertions(+), 2 deletions(-) create mode 100644 Documentation/devicetree/bindings/hsi/client-devices.txt (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/hsi/client-devices.txt b/Documentation/devicetree/bindings/hsi/client-devices.txt new file mode 100644 index 000000000000..104c9a3e57a4 --- /dev/null +++ b/Documentation/devicetree/bindings/hsi/client-devices.txt @@ -0,0 +1,44 @@ +Each HSI port is supposed to have one child node, which +symbols the remote device connected to the HSI port. The +following properties are standardized for HSI clients: + +Required HSI configuration properties: + +- hsi-channel-ids: A list of channel ids + +- hsi-rx-mode: Receiver Bit transmission mode ("stream" or "frame") +- hsi-tx-mode: Transmitter Bit transmission mode ("stream" or "frame") +- hsi-mode: May be used instead hsi-rx-mode and hsi-tx-mode if + the transmission mode is the same for receiver and + transmitter +- hsi-speed-kbps: Max bit transmission speed in kbit/s +- hsi-flow: RX flow type ("synchronized" or "pipeline") +- hsi-arb-mode: Arbitration mode for TX frame ("round-robin", "priority") + +Optional HSI configuration properties: + +- hsi-channel-names: A list with one name per channel specified in the + hsi-channel-ids property + + +Device Tree node example for an HSI client: + +hsi-controller { + hsi-port { + modem: hsi-client { + compatible = "nokia,n900-modem"; + + hsi-channel-ids = <0>, <1>, <2>, <3>; + hsi-channel-names = "mcsaab-control", + "speech-control", + "speech-data", + "mcsaab-data"; + hsi-speed-kbps = <55000>; + hsi-mode = "frame"; + hsi-flow = "synchronized"; + hsi-arb-mode = "round-robin"; + + /* more client specific properties */ + }; + }; +}; diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 834a2d6b444e..fe9371271ce2 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include "hsi_core.h" static ssize_t modalias_show(struct device *dev, @@ -50,7 +52,13 @@ static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static int hsi_bus_match(struct device *dev, struct device_driver *driver) { - return strcmp(dev_name(dev), driver->name) == 0; + if (of_driver_match_device(dev, driver)) + return true; + + if (strcmp(dev_name(dev), driver->name) == 0) + return true; + + return false; } static struct bus_type hsi_bus_type = { @@ -123,6 +131,202 @@ static void hsi_scan_board_info(struct hsi_controller *hsi) } } +#ifdef CONFIG_OF +static struct hsi_board_info hsi_char_dev_info = { + .name = "hsi_char", +}; + +static int hsi_of_property_parse_mode(struct device_node *client, char *name, + unsigned int *result) +{ + const char *mode; + int err; + + err = of_property_read_string(client, name, &mode); + if (err < 0) + return err; + + if (strcmp(mode, "stream") == 0) + *result = HSI_MODE_STREAM; + else if (strcmp(mode, "frame") == 0) + *result = HSI_MODE_FRAME; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_flow(struct device_node *client, char *name, + unsigned int *result) +{ + const char *flow; + int err; + + err = of_property_read_string(client, name, &flow); + if (err < 0) + return err; + + if (strcmp(flow, "synchronized") == 0) + *result = HSI_FLOW_SYNC; + else if (strcmp(flow, "pipeline") == 0) + *result = HSI_FLOW_PIPE; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_arb_mode(struct device_node *client, + char *name, unsigned int *result) +{ + const char *arb_mode; + int err; + + err = of_property_read_string(client, name, &arb_mode); + if (err < 0) + return err; + + if (strcmp(arb_mode, "round-robin") == 0) + *result = HSI_ARB_RR; + else if (strcmp(arb_mode, "priority") == 0) + *result = HSI_ARB_PRIO; + else + return -EINVAL; + + return 0; +} + +static void hsi_add_client_from_dt(struct hsi_port *port, + struct device_node *client) +{ + struct hsi_client *cl; + struct hsi_channel channel; + struct property *prop; + char name[32]; + int length, cells, err, i, max_chan, mode; + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return; + + err = of_modalias_node(client, name, sizeof(name)); + if (err) + goto err; + + dev_set_name(&cl->device, "%s", name); + + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); + if (err) { + err = hsi_of_property_parse_mode(client, "hsi-rx-mode", + &cl->rx_cfg.mode); + if (err) + goto err; + + err = hsi_of_property_parse_mode(client, "hsi-tx-mode", + &cl->tx_cfg.mode); + if (err) + goto err; + } else { + cl->rx_cfg.mode = mode; + cl->tx_cfg.mode = mode; + } + + err = of_property_read_u32(client, "hsi-speed-kbps", + &cl->tx_cfg.speed); + if (err) + goto err; + cl->rx_cfg.speed = cl->tx_cfg.speed; + + err = hsi_of_property_parse_flow(client, "hsi-flow", + &cl->rx_cfg.flow); + if (err) + goto err; + + err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode", + &cl->rx_cfg.arb_mode); + if (err) + goto err; + + prop = of_find_property(client, "hsi-channel-ids", &length); + if (!prop) { + err = -EINVAL; + goto err; + } + + cells = length / sizeof(u32); + + cl->rx_cfg.num_channels = cells; + cl->tx_cfg.num_channels = cells; + + cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->rx_cfg.channels) { + err = -ENOMEM; + goto err; + } + + cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->tx_cfg.channels) { + err = -ENOMEM; + goto err2; + } + + max_chan = 0; + for (i = 0; i < cells; i++) { + err = of_property_read_u32_index(client, "hsi-channel-ids", i, + &channel.id); + if (err) + goto err3; + + err = of_property_read_string_index(client, "hsi-channel-names", + i, &channel.name); + if (err) + channel.name = NULL; + + if (channel.id > max_chan) + max_chan = channel.id; + + cl->rx_cfg.channels[i] = channel; + cl->tx_cfg.channels[i] = channel; + } + + cl->rx_cfg.num_hw_channels = max_chan + 1; + cl->tx_cfg.num_hw_channels = max_chan + 1; + + cl->device.bus = &hsi_bus_type; + cl->device.parent = &port->device; + cl->device.release = hsi_client_release; + cl->device.of_node = client; + + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", name); + put_device(&cl->device); + goto err3; + } + + return; + +err3: + kfree(cl->tx_cfg.channels); +err2: + kfree(cl->rx_cfg.channels); +err: + kfree(cl); + pr_err("hsi client: missing or incorrect of property: err=%d\n", err); +} + +void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients) +{ + struct device_node *child; + + /* register hsi-char device */ + hsi_new_client(port, &hsi_char_dev_info); + + for_each_available_child_of_node(clients, child) + hsi_add_client_from_dt(port, child); +} +EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt); +#endif + int hsi_remove_client(struct device *dev, void *data __maybe_unused) { device_unregister(dev); @@ -505,7 +709,7 @@ int hsi_unregister_port_event(struct hsi_client *cl) EXPORT_SYMBOL_GPL(hsi_unregister_port_event); /** - * hsi_event -Notifies clients about port events + * hsi_event - Notifies clients about port events * @port: Port where the event occurred * @event: The event type * diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index e20a3999a696..3ec06300d535 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -301,6 +301,17 @@ struct hsi_client *hsi_new_client(struct hsi_port *port, int hsi_remove_client(struct device *dev, void *data); void hsi_port_unregister_clients(struct hsi_port *port); +#ifdef CONFIG_OF +void hsi_add_clients_from_dt(struct hsi_port *port, + struct device_node *clients); +#else +static inline void hsi_add_clients_from_dt(struct hsi_port *port, + struct device_node *clients) +{ + return; +} +#endif + static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, void *data) { -- cgit v1.2.3 From dc7bf5d7186849aa36b9f0e42e250a813a7b0bdb Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Fri, 15 Nov 2013 10:50:32 +0000 Subject: HSI: Introduce driver for SSI Protocol This adds a driver for the SSI McSAAB protocol as used in the Nokia N900. Signed-off-by: Carlos Chinea Signed-off-by: Sebastian Reichel Tested-By: Ivaylo Dimitrov --- drivers/hsi/clients/Kconfig | 8 + drivers/hsi/clients/Makefile | 3 +- drivers/hsi/clients/ssi_protocol.c | 1191 ++++++++++++++++++++++++++++++++++++ include/linux/hsi/ssi_protocol.h | 42 ++ 4 files changed, 1243 insertions(+), 1 deletion(-) create mode 100644 drivers/hsi/clients/ssi_protocol.c create mode 100644 include/linux/hsi/ssi_protocol.h (limited to 'include/linux') diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig index 3bacd275f479..1457cfb5b453 100644 --- a/drivers/hsi/clients/Kconfig +++ b/drivers/hsi/clients/Kconfig @@ -4,6 +4,14 @@ comment "HSI clients" +config SSI_PROTOCOL + tristate "SSI protocol" + depends on HSI && PHONET && (OMAP_SSI=y || OMAP_SSI=m) + help + If you say Y here, you will enable the SSI protocol aka McSAAB. + + If unsure, say N. + config HSI_CHAR tristate "HSI/SSI character driver" depends on HSI diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile index 327c0e27c8b0..ccbf768ea42b 100644 --- a/drivers/hsi/clients/Makefile +++ b/drivers/hsi/clients/Makefile @@ -2,4 +2,5 @@ # Makefile for HSI clients # -obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o +obj-$(CONFIG_HSI_CHAR) += hsi_char.o diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c new file mode 100644 index 000000000000..ce4be3738d46 --- /dev/null +++ b/drivers/hsi/clients/ssi_protocol.c @@ -0,0 +1,1191 @@ +/* + * ssi_protocol.c + * + * Implementation of the SSI McSAAB improved protocol. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2013 Sebastian Reichel + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void ssi_waketest(struct hsi_client *cl, unsigned int enable); + +#define SSIP_TXQUEUE_LEN 100 +#define SSIP_MAX_MTU 65535 +#define SSIP_DEFAULT_MTU 4000 +#define PN_MEDIA_SOS 21 +#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ +#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ +#define SSIP_KATOUT 15 /* 15 msecs */ +#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ +#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) +#define SSIP_CMT_LOADER_SYNC 0x11223344 +/* + * SSI protocol command definitions + */ +#define SSIP_COMMAND(data) ((data) >> 28) +#define SSIP_PAYLOAD(data) ((data) & 0xfffffff) +/* Commands */ +#define SSIP_SW_BREAK 0 +#define SSIP_BOOTINFO_REQ 1 +#define SSIP_BOOTINFO_RESP 2 +#define SSIP_WAKETEST_RESULT 3 +#define SSIP_START_TRANS 4 +#define SSIP_READY 5 +/* Payloads */ +#define SSIP_DATA_VERSION(data) ((data) & 0xff) +#define SSIP_LOCAL_VERID 1 +#define SSIP_WAKETEST_OK 0 +#define SSIP_WAKETEST_FAILED 1 +#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) +#define SSIP_MSG_ID(data) ((data) & 0xff) +/* Generic Command */ +#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) +/* Commands for the control channel */ +#define SSIP_BOOTINFO_REQ_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) +#define SSIP_BOOTINFO_RESP_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) +#define SSIP_START_TRANS_CMD(pdulen, id) \ + SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) +#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) +#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) + +/* Main state machine states */ +enum { + INIT, + HANDSHAKE, + ACTIVE, +}; + +/* Send state machine states */ +enum { + SEND_IDLE, + WAIT4READY, + SEND_READY, + SENDING, + SENDING_SWBREAK, +}; + +/* Receive state machine states */ +enum { + RECV_IDLE, + RECV_READY, + RECEIVING, +}; + +/** + * struct ssi_protocol - SSI protocol (McSAAB) data + * @main_state: Main state machine + * @send_state: TX state machine + * @recv_state: RX state machine + * @waketest: Flag to follow wake line test + * @rxid: RX data id + * @txid: TX data id + * @txqueue_len: TX queue length + * @tx_wd: TX watchdog + * @rx_wd: RX watchdog + * @keep_alive: Workaround for SSI HW bug + * @lock: To serialize access to this struct + * @netdev: Phonet network device + * @txqueue: TX data queue + * @cmdqueue: Queue of free commands + * @cl: HSI client own reference + * @link: Link for ssip_list + * @tx_usecount: Refcount to keep track the slaves that use the wake line + * @channel_id_cmd: HSI channel id for command stream + * @channel_id_data: HSI channel id for data stream + */ +struct ssi_protocol { + unsigned int main_state; + unsigned int send_state; + unsigned int recv_state; + unsigned int waketest:1; + u8 rxid; + u8 txid; + unsigned int txqueue_len; + struct timer_list tx_wd; + struct timer_list rx_wd; + struct timer_list keep_alive; /* wake-up workaround */ + spinlock_t lock; + struct net_device *netdev; + struct list_head txqueue; + struct list_head cmdqueue; + struct hsi_client *cl; + struct list_head link; + atomic_t tx_usecnt; + int channel_id_cmd; + int channel_id_data; +}; + +/* List of ssi protocol instances */ +static LIST_HEAD(ssip_list); + +static void ssip_rxcmd_complete(struct hsi_msg *msg); + +static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + *data = cmd; +} + +static inline u32 ssip_get_cmd(struct hsi_msg *msg) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + + return *data; +} + +static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) +{ + skb_frag_t *frag; + struct scatterlist *sg; + int i; + + BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); + + sg = msg->sgt.sgl; + sg_set_buf(sg, skb->data, skb_headlen(skb)); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg = sg_next(sg); + BUG_ON(!sg); + frag = &skb_shinfo(skb)->frags[i]; + sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); + } +} + +static void ssip_free_data(struct hsi_msg *msg) +{ + struct sk_buff *skb; + + skb = msg->context; + pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, + skb); + msg->destructor = NULL; + dev_kfree_skb(skb); + hsi_free_msg(msg); +} + +static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, + struct sk_buff *skb, gfp_t flags) +{ + struct hsi_msg *msg; + + msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); + if (!msg) + return NULL; + ssip_skb_to_msg(skb, msg); + msg->destructor = ssip_free_data; + msg->channel = ssi->channel_id_data; + msg->context = skb; + + return msg; +} + +static inline void ssip_release_cmd(struct hsi_msg *msg) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); + + dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); + spin_lock_bh(&ssi->lock); + list_add_tail(&msg->link, &ssi->cmdqueue); + spin_unlock_bh(&ssi->lock); +} + +static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + + BUG_ON(list_empty(&ssi->cmdqueue)); + + spin_lock_bh(&ssi->lock); + msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_bh(&ssi->lock); + msg->destructor = ssip_release_cmd; + + return msg; +} + +static void ssip_free_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { + list_del(&msg->link); + msg->destructor = NULL; + kfree(sg_virt(msg->sgt.sgl)); + hsi_free_msg(msg); + } +} + +static int ssip_alloc_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + u32 *buf; + unsigned int i; + + for (i = 0; i < SSIP_MAX_CMDS; i++) { + msg = hsi_alloc_msg(1, GFP_KERNEL); + if (!msg) + goto out; + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + hsi_free_msg(msg); + goto out; + } + sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); + msg->channel = ssi->channel_id_cmd; + list_add_tail(&msg->link, &ssi->cmdqueue); + } + + return 0; +out: + ssip_free_cmds(ssi); + + return -ENOMEM; +} + +static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->recv_state = state; + switch (state) { + case RECV_IDLE: + del_timer(&ssi->rx_wd); + if (ssi->send_state == SEND_IDLE) + del_timer(&ssi->keep_alive); + break; + case RECV_READY: + /* CMT speech workaround */ + if (atomic_read(&ssi->tx_usecnt)) + break; + /* Otherwise fall through */ + case RECEIVING: + mod_timer(&ssi->keep_alive, jiffies + + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->send_state = state; + switch (state) { + case SEND_IDLE: + case SEND_READY: + del_timer(&ssi->tx_wd); + if (ssi->recv_state == RECV_IDLE) + del_timer(&ssi->keep_alive); + break; + case WAIT4READY: + case SENDING: + case SENDING_SWBREAK: + mod_timer(&ssi->keep_alive, + jiffies + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) +{ + struct hsi_client *master = ERR_PTR(-ENODEV); + struct ssi_protocol *ssi; + + list_for_each_entry(ssi, &ssip_list, link) + if (slave->device.parent == ssi->cl->device.parent) { + master = ssi->cl; + break; + } + + return master; +} +EXPORT_SYMBOL_GPL(ssip_slave_get_master); + +int ssip_slave_start_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); + spin_lock_bh(&ssi->lock); + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + hsi_start_tx(master); + } + spin_unlock_bh(&ssi->lock); + atomic_inc(&ssi->tx_usecnt); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_start_tx); + +int ssip_slave_stop_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); + + if (atomic_dec_and_test(&ssi->tx_usecnt)) { + spin_lock_bh(&ssi->lock); + if ((ssi->send_state == SEND_READY) || + (ssi->send_state == WAIT4READY)) { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(master); + } + spin_unlock_bh(&ssi->lock); + } + dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); + +int ssip_slave_running(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + return netif_running(ssi->netdev); +} +EXPORT_SYMBOL_GPL(ssip_slave_running); + +static void ssip_reset(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct list_head *head, *tmp; + struct hsi_msg *msg; + + if (netif_running(ssi->netdev)) + netif_carrier_off(ssi->netdev); + hsi_flush(cl); + spin_lock_bh(&ssi->lock); + if (ssi->send_state != SEND_IDLE) + hsi_stop_tx(cl); + if (ssi->waketest) + ssi_waketest(cl, 0); + del_timer(&ssi->rx_wd); + del_timer(&ssi->tx_wd); + del_timer(&ssi->keep_alive); + ssi->main_state = 0; + ssi->send_state = 0; + ssi->recv_state = 0; + ssi->waketest = 0; + ssi->rxid = 0; + ssi->txid = 0; + list_for_each_safe(head, tmp, &ssi->txqueue) { + msg = list_entry(head, struct hsi_msg, link); + dev_dbg(&cl->device, "Pending TX data\n"); + list_del(head); + ssip_free_data(msg); + } + ssi->txqueue_len = 0; + spin_unlock_bh(&ssi->lock); +} + +static void ssip_dump_state(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + spin_lock_bh(&ssi->lock); + dev_err(&cl->device, "Main state: %d\n", ssi->main_state); + dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); + dev_err(&cl->device, "Send state: %d\n", ssi->send_state); + dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? + "Online" : "Offline"); + dev_err(&cl->device, "Wake test %d\n", ssi->waketest); + dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); + dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); + + list_for_each_entry(msg, &ssi->txqueue, link) + dev_err(&cl->device, "pending TX data (%p)\n", msg); + spin_unlock_bh(&ssi->lock); +} + +static void ssip_error(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + ssip_dump_state(cl); + ssip_reset(cl); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_keep_alive(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", + ssi->main_state, ssi->recv_state, ssi->send_state); + + spin_lock(&ssi->lock); + if (ssi->recv_state == RECV_IDLE) + switch (ssi->send_state) { + case SEND_READY: + if (atomic_read(&ssi->tx_usecnt) == 0) + break; + /* + * Fall through. Workaround for cmt-speech + * in that case we relay on audio timers. + */ + case SEND_IDLE: + spin_unlock(&ssi->lock); + return; + } + mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); + spin_unlock(&ssi->lock); +} + +static void ssip_wd(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + + dev_err(&cl->device, "Watchdog trigerred\n"); + ssip_error(cl); +} + +static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + dev_dbg(&cl->device, "Issuing RX command\n"); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_start_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, + ssi->recv_state); + spin_lock(&ssi->lock); + /* + * We can have two UP events in a row due to a short low + * high transition. Therefore we need to ignore the sencond UP event. + */ + if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { + if (ssi->main_state == INIT) { + ssi->main_state = HANDSHAKE; + spin_unlock(&ssi->lock); + ssip_send_bootinfo_req_cmd(cl); + } else { + spin_unlock(&ssi->lock); + } + return; + } + ssip_set_rxstate(ssi, RECV_READY); + spin_unlock(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_READY_CMD); + msg->complete = ssip_release_cmd; + dev_dbg(&cl->device, "Send READY\n"); + hsi_async_write(cl, msg); +} + +static void ssip_stop_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); + spin_lock(&ssi->lock); + if (likely(ssi->main_state == ACTIVE)) + ssip_set_rxstate(ssi, RECV_IDLE); + spin_unlock(&ssi->lock); +} + +static void ssip_free_strans(struct hsi_msg *msg) +{ + ssip_free_data(msg->context); + ssip_release_cmd(msg); +} + +static void ssip_strans_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *data; + + data = msg->context; + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + ssip_set_txstate(ssi, SENDING); + spin_unlock(&ssi->lock); + hsi_async_write(cl, data); +} + +static int ssip_xmit(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg, *dmsg; + struct sk_buff *skb; + + spin_lock_bh(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + spin_unlock_bh(&ssi->lock); + return 0; + } + dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); + list_del(&dmsg->link); + ssi->txqueue_len--; + spin_unlock_bh(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + skb = dmsg->context; + msg->context = dmsg; + msg->complete = ssip_strans_complete; + msg->destructor = ssip_free_strans; + + spin_lock_bh(&ssi->lock); + ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), + ssi->txid)); + ssi->txid++; + ssip_set_txstate(ssi, SENDING); + spin_unlock_bh(&ssi->lock); + + dev_dbg(&cl->device, "Send STRANS (%d frames)\n", + SSIP_BYTES_TO_FRAMES(skb->len)); + + return hsi_async_write(cl, msg); +} + +/* In soft IRQ context */ +static void ssip_pn_rx(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + + if (unlikely(!netif_running(dev))) { + dev_dbg(&dev->dev, "Drop RX packet\n"); + dev->stats.rx_dropped++; + dev_kfree_skb(skb); + return; + } + if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { + dev_dbg(&dev->dev, "Error drop RX packet\n"); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb(skb); + return; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + /* length field is exchanged in network byte order */ + ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); + dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", + ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); + + skb->protocol = htons(ETH_P_PHONET); + skb_reset_mac_header(skb); + __skb_pull(skb, 1); + netif_rx(skb); +} + +static void ssip_rx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX data error\n"); + ssip_free_data(msg); + ssip_error(cl); + return; + } + del_timer(&ssi->rx_wd); /* FIXME: Revisit */ + skb = msg->context; + ssip_pn_rx(skb); + hsi_free_msg(msg); +} + +static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + /* Workaroud: Ignore CMT Loader message leftover */ + if (cmd == SSIP_CMT_LOADER_SYNC) + return; + + switch (ssi->main_state) { + case ACTIVE: + dev_err(&cl->device, "Boot info req on active state\n"); + ssip_error(cl); + /* Fall through */ + case INIT: + spin_lock(&ssi->lock); + ssi->main_state = HANDSHAKE; + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + /* Start boot handshake watchdog */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + spin_unlock(&ssi->lock); + dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info req verid mismatch\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + break; + case HANDSHAKE: + /* Ignore */ + break; + default: + dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); + break; + } +} + +static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info resp verid mismatch\n"); + + spin_lock(&ssi->lock); + if (ssi->main_state != ACTIVE) + /* Use tx_wd as a boot watchdog in non ACTIVE state */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + else + dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); +} + +static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + unsigned int wkres = SSIP_PAYLOAD(cmd); + + spin_lock(&ssi->lock); + if (ssi->main_state != HANDSHAKE) { + dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->waketest) { + ssi->waketest = 0; + ssi_waketest(cl, 0); /* FIXME: To be removed */ + } + ssi->main_state = ACTIVE; + del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ + spin_unlock(&ssi->lock); + + dev_notice(&cl->device, "WAKELINES TEST %s\n", + wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); + if (wkres & SSIP_WAKETEST_FAILED) { + ssip_error(cl); + return; + } + dev_dbg(&cl->device, "CMT is ONLINE\n"); + netif_wake_queue(ssi->netdev); + netif_carrier_on(ssi->netdev); +} + +static void ssip_rx_ready(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->send_state != WAIT4READY) { + dev_dbg(&cl->device, "Ignore spurious READY command\n"); + spin_unlock(&ssi->lock); + return; + } + ssip_set_txstate(ssi, SEND_READY); + spin_unlock(&ssi->lock); + ssip_xmit(cl); +} + +static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + struct hsi_msg *msg; + int len = SSIP_PDU_LENGTH(cmd); + + dev_dbg(&cl->device, "RX strans: %d frames\n", len); + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + ssip_set_rxstate(ssi, RECEIVING); + if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { + dev_err(&cl->device, "START TRANS id %d expeceted %d\n", + SSIP_MSG_ID(cmd), ssi->rxid); + spin_unlock(&ssi->lock); + goto out1; + } + ssi->rxid++; + spin_unlock(&ssi->lock); + skb = netdev_alloc_skb(ssi->netdev, len * 4); + if (unlikely(!skb)) { + dev_err(&cl->device, "No memory for rx skb\n"); + goto out1; + } + skb->dev = ssi->netdev; + skb_put(skb, len * 4); + msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); + if (unlikely(!msg)) { + dev_err(&cl->device, "No memory for RX data msg\n"); + goto out2; + } + msg->complete = ssip_rx_data_complete; + hsi_async_read(cl, msg); + + return; +out2: + dev_kfree_skb(skb); +out1: + ssip_error(cl); +} + +static void ssip_rxcmd_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + u32 cmd = ssip_get_cmd(msg); + unsigned int cmdid = SSIP_COMMAND(cmd); + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX error detected\n"); + ssip_release_cmd(msg); + ssip_error(cl); + return; + } + hsi_async_read(cl, msg); + dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); + switch (cmdid) { + case SSIP_SW_BREAK: + /* Ignored */ + break; + case SSIP_BOOTINFO_REQ: + ssip_rx_bootinforeq(cl, cmd); + break; + case SSIP_BOOTINFO_RESP: + ssip_rx_bootinforesp(cl, cmd); + break; + case SSIP_WAKETEST_RESULT: + ssip_rx_waketest(cl, cmd); + break; + case SSIP_START_TRANS: + ssip_rx_strans(cl, cmd); + break; + case SSIP_READY: + ssip_rx_ready(cl); + break; + default: + dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); + break; + } +} + +static void ssip_swbreak_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + if (atomic_read(&ssi->tx_usecnt)) { + ssip_set_txstate(ssi, SEND_READY); + } else { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(cl); + } + spin_unlock(&ssi->lock); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } + netif_wake_queue(ssi->netdev); +} + +static void ssip_tx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *cmsg; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "TX data error\n"); + ssip_error(cl); + goto out; + } + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + ssip_set_txstate(ssi, SENDING_SWBREAK); + spin_unlock(&ssi->lock); + cmsg = ssip_claim_cmd(ssi); + ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); + cmsg->complete = ssip_swbreak_complete; + dev_dbg(&cl->device, "Send SWBREAK\n"); + hsi_async_write(cl, cmsg); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } +out: + ssip_free_data(msg); +} + +void ssip_port_event(struct hsi_client *cl, unsigned long event) +{ + switch (event) { + case HSI_EVENT_START_RX: + ssip_start_rx(cl); + break; + case HSI_EVENT_STOP_RX: + ssip_stop_rx(cl); + break; + default: + return; + } +} + +static int ssip_pn_open(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + int err; + + err = hsi_claim_port(cl, 1); + if (err < 0) { + dev_err(&cl->device, "SSI port already claimed\n"); + return err; + } + err = hsi_register_port_event(cl, ssip_port_event); + if (err < 0) { + dev_err(&cl->device, "Register HSI port event failed (%d)\n", + err); + return err; + } + dev_dbg(&cl->device, "Configuring SSI port\n"); + hsi_setup(cl); + spin_lock_bh(&ssi->lock); + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + ssi->main_state = INIT; + spin_unlock_bh(&ssi->lock); + + return 0; +} + +static int ssip_pn_stop(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + + ssip_reset(cl); + hsi_unregister_port_event(cl); + hsi_release_port(cl); + + return 0; +} + +static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU) + return -EINVAL; + dev->mtu = new_mtu; + + return 0; +} + +static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + if ((skb->protocol != htons(ETH_P_PHONET)) || + (skb->len < SSIP_MIN_PN_HDR)) + goto drop; + /* Pad to 32-bits - FIXME: Revisit*/ + if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) + goto drop; + + /* + * Modem sends Phonet messages over SSI with its own endianess... + * Assume that modem has the same endianess as we do. + */ + if (skb_cow_head(skb, 0)) + goto drop; + + /* length field is exchanged in network byte order */ + ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); + + msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); + if (!msg) { + dev_dbg(&cl->device, "Dropping tx data: No memory\n"); + goto drop; + } + msg->complete = ssip_tx_data_complete; + + spin_lock_bh(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); + goto drop2; + } + list_add_tail(&msg->link, &ssi->txqueue); + ssi->txqueue_len++; + if (dev->tx_queue_len < ssi->txqueue_len) { + dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); + netif_stop_queue(dev); + } + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); + hsi_start_tx(cl); + } else if (ssi->send_state == SEND_READY) { + /* Needed for cmt-speech workaround */ + dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", + ssi->txqueue_len); + spin_unlock_bh(&ssi->lock); + ssip_xmit(cl); + } else { + spin_unlock_bh(&ssi->lock); + } + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return 0; +drop2: + hsi_free_msg(msg); +drop: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + + return 0; +} + +/* CMT reset event handler */ +void ssip_reset_event(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + dev_err(&ssi->cl->device, "CMT reset detected!\n"); + ssip_error(ssi->cl); +} +EXPORT_SYMBOL_GPL(ssip_reset_event); + +static const struct net_device_ops ssip_pn_ops = { + .ndo_open = ssip_pn_open, + .ndo_stop = ssip_pn_stop, + .ndo_start_xmit = ssip_pn_xmit, + .ndo_change_mtu = ssip_pn_set_mtu, +}; + +static void ssip_pn_setup(struct net_device *dev) +{ + dev->features = 0; + dev->netdev_ops = &ssip_pn_ops; + dev->type = ARPHRD_PHONET; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = SSIP_DEFAULT_MTU; + dev->hard_header_len = 1; + dev->dev_addr[0] = PN_MEDIA_SOS; + dev->addr_len = 1; + dev->tx_queue_len = SSIP_TXQUEUE_LEN; + + dev->destructor = free_netdev; + dev->header_ops = &phonet_header_ops; +} + +static int ssi_protocol_probe(struct device *dev) +{ + static const char ifname[] = "phonet%d"; + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi; + int err; + + ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); + if (!ssi) { + dev_err(dev, "No memory for ssi protocol\n"); + return -ENOMEM; + } + + spin_lock_init(&ssi->lock); + init_timer_deferrable(&ssi->rx_wd); + init_timer_deferrable(&ssi->tx_wd); + init_timer(&ssi->keep_alive); + ssi->rx_wd.data = (unsigned long)cl; + ssi->rx_wd.function = ssip_wd; + ssi->tx_wd.data = (unsigned long)cl; + ssi->tx_wd.function = ssip_wd; + ssi->keep_alive.data = (unsigned long)cl; + ssi->keep_alive.function = ssip_keep_alive; + INIT_LIST_HEAD(&ssi->txqueue); + INIT_LIST_HEAD(&ssi->cmdqueue); + atomic_set(&ssi->tx_usecnt, 0); + hsi_client_set_drvdata(cl, ssi); + ssi->cl = cl; + + ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); + if (ssi->channel_id_cmd < 0) { + err = ssi->channel_id_cmd; + dev_err(dev, "Could not get cmd channel (%d)\n", err); + goto out; + } + + ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); + if (ssi->channel_id_data < 0) { + err = ssi->channel_id_data; + dev_err(dev, "Could not get data channel (%d)\n", err); + goto out; + } + + err = ssip_alloc_cmds(ssi); + if (err < 0) { + dev_err(dev, "No memory for commands\n"); + goto out; + } + + ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup); + if (!ssi->netdev) { + dev_err(dev, "No memory for netdev\n"); + err = -ENOMEM; + goto out1; + } + + SET_NETDEV_DEV(ssi->netdev, dev); + netif_carrier_off(ssi->netdev); + err = register_netdev(ssi->netdev); + if (err < 0) { + dev_err(dev, "Register netdev failed (%d)\n", err); + goto out2; + } + + list_add(&ssi->link, &ssip_list); + + dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", + ssi->channel_id_cmd, ssi->channel_id_data); + + return 0; +out2: + free_netdev(ssi->netdev); +out1: + ssip_free_cmds(ssi); +out: + kfree(ssi); + + return err; +} + +static int ssi_protocol_remove(struct device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + list_del(&ssi->link); + unregister_netdev(ssi->netdev); + ssip_free_cmds(ssi); + hsi_client_set_drvdata(cl, NULL); + kfree(ssi); + + return 0; +} + +static struct hsi_client_driver ssip_driver = { + .driver = { + .name = "ssi-protocol", + .owner = THIS_MODULE, + .probe = ssi_protocol_probe, + .remove = ssi_protocol_remove, + }, +}; + +static int __init ssip_init(void) +{ + pr_info("SSI protocol aka McSAAB added\n"); + + return hsi_register_client_driver(&ssip_driver); +} +module_init(ssip_init); + +static void __exit ssip_exit(void) +{ + hsi_unregister_client_driver(&ssip_driver); + pr_info("SSI protocol driver removed\n"); +} +module_exit(ssip_exit); + +MODULE_ALIAS("hsi:ssi-protocol"); +MODULE_AUTHOR("Carlos Chinea "); +MODULE_AUTHOR("Remi Denis-Courmont "); +MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h new file mode 100644 index 000000000000..1433651be0dc --- /dev/null +++ b/include/linux/hsi/ssi_protocol.h @@ -0,0 +1,42 @@ +/* + * ssip_slave.h + * + * SSIP slave support header file + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_SSIP_SLAVE_H__ +#define __LINUX_SSIP_SLAVE_H__ + +#include + +static inline void ssip_slave_put_master(struct hsi_client *master) +{ +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave); +int ssip_slave_start_tx(struct hsi_client *master); +int ssip_slave_stop_tx(struct hsi_client *master); +void ssip_reset_event(struct hsi_client *master); + +int ssip_slave_running(struct hsi_client *master); + +#endif /* __LINUX_SSIP_SLAVE_H__ */ + -- cgit v1.2.3 From 1f0b63866fc1be700260547be8edf8e6f0af37f2 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 15 May 2014 23:29:57 +0200 Subject: ACPI / PM: Hold ACPI scan lock over the "freeze" sleep state The "freeze" sleep state suffers from the same issue that was addressed by commit ad07277e82de (ACPI / PM: Hold acpi_scan_lock over system PM transitions) for ACPI sleep states, that is, things break if ->remove() is called for devices whose system resume callbacks haven't been executed yet. It also can be addressed in the same way, by holding the ACPI scan lock over the "freeze" sleep state and PM transitions to and from that state, but ->begin() and ->end() platform operations for the "freeze" sleep state are needed for this purpose. This change has been tested on Acer Aspire S5 with Thunderbolt. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 18 ++++++++++++++++++ include/linux/suspend.h | 7 +++++++ kernel/power/suspend.c | 15 +++++++++++++++ 3 files changed, 40 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2281ca31c1bc..c11e3795431b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -612,6 +612,22 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { .recover = acpi_pm_finish, }; +static int acpi_freeze_begin(void) +{ + acpi_scan_lock_acquire(); + return 0; +} + +static void acpi_freeze_end(void) +{ + acpi_scan_lock_release(); +} + +static const struct platform_freeze_ops acpi_freeze_ops = { + .begin = acpi_freeze_begin, + .end = acpi_freeze_end, +}; + static void acpi_sleep_suspend_setup(void) { int i; @@ -622,7 +638,9 @@ static void acpi_sleep_suspend_setup(void) suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); + freeze_set_ops(&acpi_freeze_ops); } + #else /* !CONFIG_SUSPEND */ static inline void acpi_sleep_suspend_setup(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f73cabf59012..91d66fd8dce1 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -187,6 +187,11 @@ struct platform_suspend_ops { void (*recover)(void); }; +struct platform_freeze_ops { + int (*begin)(void); + void (*end)(void); +}; + #ifdef CONFIG_SUSPEND /** * suspend_set_ops - set platform dependent suspend operations @@ -194,6 +199,7 @@ struct platform_suspend_ops { */ extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); +extern void freeze_set_ops(const struct platform_freeze_ops *ops); extern void freeze_wake(void); /** @@ -220,6 +226,7 @@ extern int pm_suspend(suspend_state_t state); static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } +static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} static inline void freeze_wake(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8233cd4047d7..73a905f83972 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -38,6 +38,7 @@ const char *const pm_states[PM_SUSPEND_MAX] = { }; static const struct platform_suspend_ops *suspend_ops; +static const struct platform_freeze_ops *freeze_ops; static bool need_suspend_ops(suspend_state_t state) { @@ -47,6 +48,13 @@ static bool need_suspend_ops(suspend_state_t state) static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); static bool suspend_freeze_wake; +void freeze_set_ops(const struct platform_freeze_ops *ops) +{ + lock_system_sleep(); + freeze_ops = ops; + unlock_system_sleep(); +} + static void freeze_begin(void) { suspend_freeze_wake = false; @@ -269,6 +277,10 @@ int suspend_devices_and_enter(suspend_state_t state) error = suspend_ops->begin(state); if (error) goto Close; + } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { + error = freeze_ops->begin(); + if (error) + goto Close; } suspend_console(); suspend_test_start(); @@ -294,6 +306,9 @@ int suspend_devices_and_enter(suspend_state_t state) Close: if (need_suspend_ops(state) && suspend_ops->end) suspend_ops->end(); + else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) + freeze_ops->end(); + trace_machine_suspend(PWR_EVENT_EXIT); return error; -- cgit v1.2.3 From 7b6ef1262549f6afc5c881aaef80beb8fd15f908 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 May 2014 15:44:05 +0000 Subject: genirq: Provide generic hwirq allocation facility Not really the solution to the problem, but at least it confines the mess in the core code and allows to get rid of the create/destroy_irq variants from hell, i.e. 3 implementations with different semantics plus the x86 specific variants __create_irqs and create_irq_nr which have been invented in another circle of hell. x86 : x86 should be converted to irq domains and I'm deliberately making it impossible to do the multi-vector MSI support by adding more crap to the current mess. It's not that hard to do and I'm really tired of the trainwrecks which have been invented by baindaid engineering so far. Any attempt to do multi-vector MSI or ioapic hotplug without converting to irq domains is NAKed hereby. tile: Might use irq domains as well, but it has a very limited interrupt space, so handling it via this functionality might be the right thing to do even in the long run. ia64: That's an hopeless case, as I doubt that anyone has the stomach to rewrite the homebrewn dynamic allocation facilities. I stared at it for a couple of hours and gave up. The create/destroy_irq mess could be made private to itanic right away if there wouldn't be the iommu/dmar driver being shared with x86. So to do that I'm going to add a separate ia64 specific implementation later in order not to deep-six itanic right away. Signed-off-by: Thomas Gleixner Reviewed-by: Grant Likely Cc: Tony Luck Cc: Peter Zijlstra Cc: Chris Metcalf Cc: Fenghua Yu Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20140507154334.208629358@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 15 +++++++++++++++ kernel/irq/Kconfig | 5 +++++ kernel/irq/irqdesc.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 5c57efb863d0..c75dd161d37f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -637,6 +637,21 @@ static inline int irq_reserve_irq(unsigned int irq) return irq_reserve_irqs(irq, 1); } +#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ +unsigned int irq_alloc_hwirqs(int cnt, int node); +static inline unsigned int irq_alloc_hwirq(int node) +{ + return irq_alloc_hwirqs(1, node); +} +void irq_free_hwirqs(unsigned int from, int cnt); +static inline void irq_free_hwirq(unsigned int irq) +{ + return irq_free_hwirqs(irq, 1); +} +int arch_setup_hwirq(unsigned int irq, int node); +void arch_teardown_hwirq(unsigned int irq); +#endif + #ifndef irq_reg_writel # define irq_reg_writel(val, addr) writel(val, addr) #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 07cbdfea9ae2..a83f10e406c1 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -17,6 +17,11 @@ config GENERIC_IRQ_SHOW config GENERIC_IRQ_SHOW_LEVEL bool +# Facility to allocate a hardware interrupt. This is legacy support +# and should not be used in new code. Use irq domains instead. +config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ + bool + # Support for delayed migration from interrupt context config GENERIC_PENDING_IRQ bool diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index bb07f2928f4b..f388ade5e792 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -396,6 +396,57 @@ err: } EXPORT_SYMBOL_GPL(__irq_alloc_descs); +#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ +/** + * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware + * @cnt: number of interrupts to allocate + * @node: node on which to allocate + * + * Returns an interrupt number > 0 or 0, if the allocation fails. + */ +unsigned int irq_alloc_hwirqs(int cnt, int node) +{ + int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); + + if (irq < 0) + return 0; + + for (i = irq; cnt > 0; i++, cnt--) { + if (arch_setup_hwirq(i, node)) + goto err; + irq_clear_status_flags(i, _IRQ_NOREQUEST); + } + return irq; + +err: + for (i--; i >= irq; i--) { + irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); + arch_teardown_hwirq(i); + } + irq_free_descs(irq, cnt); + return 0; +} +EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); + +/** + * irq_free_hwirqs - Free irq descriptor and cleanup the hardware + * @from: Free from irq number + * @cnt: number of interrupts to free + * + */ +void irq_free_hwirqs(unsigned int from, int cnt) +{ + int i; + + for (i = from; cnt > 0; i++, cnt--) { + irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); + arch_teardown_hwirq(i); + } + irq_free_descs(from, cnt); +} +EXPORT_SYMBOL_GPL(irq_free_hwirqs); +#endif + /** * irq_reserve_irqs - mark irqs allocated * @from: mark from irq number -- cgit v1.2.3 From 54859f59fc18e5c104a4095420b3fcef8bc3ae63 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 May 2014 15:44:12 +0000 Subject: x86: Remove create/destroy_irq() No more users. Remove the cruft Signed-off-by: Thomas Gleixner Reviewed-by: Grant Likely Cc: Tony Luck Cc: Peter Zijlstra Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20140507154336.760446122@linutronix.de Signed-off-by: Thomas Gleixner --- arch/x86/kernel/apic/io_apic.c | 106 +---------------------------------------- include/linux/irq.h | 4 -- 2 files changed, 1 insertion(+), 109 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index be3b5741badb..efda2f648f59 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -281,18 +281,6 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) return cfg; } -static int alloc_irqs_from(unsigned int from, unsigned int count, int node) -{ - return irq_alloc_descs_from(from, count, node); -} - -static void free_irq_at(unsigned int at, struct irq_cfg *cfg) -{ - free_irq_cfg(at, cfg); - irq_free_desc(at); -} - - struct io_apic { unsigned int index; unsigned int unused[3]; @@ -2916,100 +2904,8 @@ static int __init ioapic_init_ops(void) device_initcall(ioapic_init_ops); /* - * Dynamic irq allocate and deallocation + * Dynamic irq allocate and deallocation. Should be replaced by irq domains! */ -unsigned int __create_irqs(unsigned int from, unsigned int count, int node) -{ - struct irq_cfg **cfg; - unsigned long flags; - int irq, i; - - if (from < nr_irqs_gsi) - from = nr_irqs_gsi; - - cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); - if (!cfg) - return 0; - - irq = alloc_irqs_from(from, count, node); - if (irq < 0) - goto out_cfgs; - - for (i = 0; i < count; i++) { - cfg[i] = alloc_irq_cfg(irq + i, node); - if (!cfg[i]) - goto out_irqs; - } - - raw_spin_lock_irqsave(&vector_lock, flags); - for (i = 0; i < count; i++) - if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) - goto out_vecs; - raw_spin_unlock_irqrestore(&vector_lock, flags); - - for (i = 0; i < count; i++) { - irq_set_chip_data(irq + i, cfg[i]); - irq_clear_status_flags(irq + i, IRQ_NOREQUEST); - } - - kfree(cfg); - return irq; - -out_vecs: - for (i--; i >= 0; i--) - __clear_irq_vector(irq + i, cfg[i]); - raw_spin_unlock_irqrestore(&vector_lock, flags); -out_irqs: - for (i = 0; i < count; i++) - free_irq_at(irq + i, cfg[i]); -out_cfgs: - kfree(cfg); - return 0; -} - -unsigned int create_irq_nr(unsigned int from, int node) -{ - return __create_irqs(from, 1, node); -} - -int create_irq(void) -{ - int node = cpu_to_node(0); - unsigned int irq_want; - int irq; - - irq_want = nr_irqs_gsi; - irq = create_irq_nr(irq_want, node); - - if (irq == 0) - irq = -1; - - return irq; -} - -void destroy_irq(unsigned int irq) -{ - struct irq_cfg *cfg = irq_get_chip_data(irq); - unsigned long flags; - - irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); - - free_remapped_irq(irq); - - raw_spin_lock_irqsave(&vector_lock, flags); - __clear_irq_vector(irq, cfg); - raw_spin_unlock_irqrestore(&vector_lock, flags); - free_irq_at(irq, cfg); -} - -void destroy_irqs(unsigned int irq, unsigned int count) -{ - unsigned int i; - - for (i = 0; i < count; i++) - destroy_irq(irq + i); -} - int arch_setup_hwirq(unsigned int irq, int node) { struct irq_cfg *cfg; diff --git a/include/linux/irq.h b/include/linux/irq.h index c75dd161d37f..7549ed59d3d4 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -526,12 +526,8 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq) } /* Handle dynamic irq creation and destruction */ -extern unsigned int create_irq_nr(unsigned int irq_want, int node); -extern unsigned int __create_irqs(unsigned int from, unsigned int count, - int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); -extern void destroy_irqs(unsigned int irq, unsigned int count); /* * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and -- cgit v1.2.3 From e8784e4f9a578344023ae4e08a509b7c5eab5eb0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 May 2014 15:44:17 +0000 Subject: genirq: Make create/destroy_irq() ia64 private No more users outside of itanic. Confine it. Signed-off-by: Thomas Gleixner Reviewed-by: Grant Likely Tested-by: Tony Luck Cc: Peter Zijlstra Cc: Fenghua Yu Link: http://lkml.kernel.org/r/20140507154338.700598389@linutronix.de Signed-off-by: Thomas Gleixner --- arch/ia64/include/asm/irq.h | 3 +++ include/linux/irq.h | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 91b920fd7d53..820667cbea7e 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h @@ -31,4 +31,7 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask); #define is_affinity_mask_valid is_affinity_mask_valid +int create_irq(void); +void destroy_irq(unsigned int irq); + #endif /* _ASM_IA64_IRQ_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 7549ed59d3d4..ac9634286f42 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -525,10 +525,6 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq) IRQ_NOPROBE | IRQ_PER_CPU_DEVID); } -/* Handle dynamic irq creation and destruction */ -extern int create_irq(void); -extern void destroy_irq(unsigned int irq); - /* * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and * irq_free_desc instead. -- cgit v1.2.3 From 1d008353ba088fdec0b2a944e140ff9154a5fb20 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 May 2014 15:44:21 +0000 Subject: genirq: Remove irq_reserve_irq[s] No more users. And it's not going to come back. If you need hotplugable irq chips, use irq domains. Signed-off-by: Thomas Gleixner Reviewed-and-acked-by: Grant Likely Tested-by: Tony Luck Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20140507154340.302183048@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 7 ------- kernel/irq/irqdesc.c | 25 ------------------------- 2 files changed, 32 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index ac9634286f42..2110f46fcafa 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -617,18 +617,11 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, irq_alloc_descs(-1, from, cnt, node) void irq_free_descs(unsigned int irq, unsigned int cnt); -int irq_reserve_irqs(unsigned int from, unsigned int cnt); - static inline void irq_free_desc(unsigned int irq) { irq_free_descs(irq, 1); } -static inline int irq_reserve_irq(unsigned int irq) -{ - return irq_reserve_irqs(irq, 1); -} - #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ unsigned int irq_alloc_hwirqs(int cnt, int node); static inline unsigned int irq_alloc_hwirq(int node) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 24029348729b..d514ed6080e1 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -454,31 +454,6 @@ void irq_free_hwirqs(unsigned int from, int cnt) EXPORT_SYMBOL_GPL(irq_free_hwirqs); #endif -/** - * irq_reserve_irqs - mark irqs allocated - * @from: mark from irq number - * @cnt: number of irqs to mark - * - * Returns 0 on success or an appropriate error code - */ -int irq_reserve_irqs(unsigned int from, unsigned int cnt) -{ - unsigned int start; - int ret = 0; - - if (!cnt || (from + cnt) > nr_irqs) - return -EINVAL; - - mutex_lock(&sparse_irq_lock); - start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); - if (start == from) - bitmap_set(allocated_irqs, start, cnt); - else - ret = -EEXIST; - mutex_unlock(&sparse_irq_lock); - return ret; -} - /** * irq_get_next_irq - get next allocated irq number * @offset: where to start the search -- cgit v1.2.3 From c940e01c94e73a2a5318f1b82038e0746aaec753 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 May 2014 15:44:22 +0000 Subject: genirq: Replace dynamic_irq_init/cleanup Create a new interface and confine it with a config switch which makes clear that this is just legacy support and not to be used for new code. Signed-off-by: Thomas Gleixner Reviewed-by: Grant Likely Tested-by: Tony Luck Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20140507154340.574437049@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 4 ++++ kernel/irq/Kconfig | 4 ++++ kernel/irq/irqdesc.c | 7 +++++++ 3 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 2110f46fcafa..8ff71d14365a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -637,6 +637,10 @@ int arch_setup_hwirq(unsigned int irq, int node); void arch_teardown_hwirq(unsigned int irq); #endif +#ifdef CONFIG_GENERIC_IRQ_LEGACY +void irq_init_desc(unsigned int irq); +#endif + #ifndef irq_reg_writel # define irq_reg_writel(val, addr) writel(val, addr) #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index a83f10e406c1..d269cecdfbf0 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -5,6 +5,10 @@ menu "IRQ subsystem" config MAY_HAVE_SPARSE_IRQ bool +# Legacy support, required for itanic +config GENERIC_IRQ_LEGACY + bool + # Enable the generic irq autoprobe mechanism config GENERIC_IRQ_PROBE bool diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index d514ed6080e1..7f267799a717 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -306,6 +306,13 @@ void irq_mark_irq(unsigned int irq) mutex_unlock(&sparse_irq_lock); } +#ifdef CONFIG_GENERIC_IRQ_LEGACY +void irq_init_desc(unsigned int irq) +{ + dynamic_irq_cleanup(irq); +} +#endif + #endif /* !CONFIG_SPARSE_IRQ */ /** -- cgit v1.2.3 From d8179bc0db8d0c9654d5de43de2874bf6d0a58fa Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 May 2014 15:44:23 +0000 Subject: genirq: Remove dynamic_irq mess No more users. Get rid of the cruft. Signed-off-by: Thomas Gleixner Reviewed-by: Grant Likely Tested-by: Tony Luck Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20140507154341.012847637@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 10 ---------- kernel/irq/irqdesc.c | 23 +++++++---------------- 2 files changed, 7 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 8ff71d14365a..0d998d8b01d8 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -525,16 +525,6 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq) IRQ_NOPROBE | IRQ_PER_CPU_DEVID); } -/* - * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and - * irq_free_desc instead. - */ -extern void dynamic_irq_cleanup(unsigned int irq); -static inline void dynamic_irq_init(unsigned int irq) -{ - dynamic_irq_cleanup(irq); -} - /* Set/get chip/data for an IRQ: */ extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); extern int irq_set_handler_data(unsigned int irq, void *data); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 7f267799a717..7339e42a85ab 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -278,7 +278,12 @@ EXPORT_SYMBOL(irq_to_desc); static void free_desc(unsigned int irq) { - dynamic_irq_cleanup(irq); + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + desc_set_defaults(irq, desc, desc_node(desc), NULL); + raw_spin_unlock_irqrestore(&desc->lock, flags); } static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, @@ -309,7 +314,7 @@ void irq_mark_irq(unsigned int irq) #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq) { - dynamic_irq_cleanup(irq); + free_desc(irq); } #endif @@ -522,20 +527,6 @@ int irq_set_percpu_devid(unsigned int irq) return 0; } -/** - * dynamic_irq_cleanup - cleanup a dynamically allocated irq - * @irq: irq number to initialize - */ -void dynamic_irq_cleanup(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - - raw_spin_lock_irqsave(&desc->lock, flags); - desc_set_defaults(irq, desc, desc_node(desc), NULL); - raw_spin_unlock_irqrestore(&desc->lock, flags); -} - void kstat_incr_irq_this_cpu(unsigned int irq) { kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); -- cgit v1.2.3 From cdf86cd233207ed992a647f0b9d42c60735756e7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 8 May 2014 15:42:25 +0200 Subject: gpio: include linux/bug.h in interface header Today's linux-next kernel started showing build errors for the use of WARN_ON in linux/gpio/consumer.h: In file included from drivers/video/backlight/pwm_bl.c:13:0: include/linux/gpio/consumer.h: In function 'gpiod_put': include/linux/gpio/consumer.h:81:2: error: implicit declaration of function 'WARN_ON' [-Werror=implicit-function-declaration] It's not clear why this never happened before, but this patch fixes it by including the header that contains the defintion of this macro. Signed-off-by: Arnd Bergmann Acked-by: Alexandre Courbot Signed-off-by: Linus Walleij --- include/linux/gpio/consumer.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 6a37ef0dc59c..05e53ccb708b 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -1,6 +1,7 @@ #ifndef __LINUX_GPIO_CONSUMER_H #define __LINUX_GPIO_CONSUMER_H +#include #include #include -- cgit v1.2.3 From aae4518b3124b29f8dc81c829c704fd2df72e98b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 16 May 2014 02:46:50 +0200 Subject: PM / sleep: Mechanism to avoid resuming runtime-suspended devices unnecessarily Currently, some subsystems (e.g. PCI and the ACPI PM domain) have to resume all runtime-suspended devices during system suspend, mostly because those devices may need to be reprogrammed due to different wakeup settings for system sleep and for runtime PM. For some devices, though, it's OK to remain in runtime suspend throughout a complete system suspend/resume cycle (if the device was in runtime suspend at the start of the cycle). We would like to do this whenever possible, to avoid the overhead of extra power-up and power-down events. However, problems may arise because the device's descendants may require it to be at full power at various points during the cycle. Therefore the most straightforward way to do this safely is if the device and all its descendants can remain runtime suspended until the complete stage of system resume. To this end, introduce a new device PM flag, power.direct_complete and modify the PM core to use that flag as follows. If the ->prepare() callback of a device returns a positive number, the PM core will regard that as an indication that it may leave the device runtime-suspended. It will then check if the system power transition in progress is a suspend (and not hibernation in particular) and if the device is, indeed, runtime-suspended. In that case, the PM core will set the device's power.direct_complete flag. Otherwise it will clear power.direct_complete for the device and it also will later clear it for the device's parent (if there's one). Next, the PM core will not invoke the ->suspend() ->suspend_late(), ->suspend_irq(), ->resume_irq(), ->resume_early(), or ->resume() callbacks for all devices having power.direct_complete set. It will invoke their ->complete() callbacks, however, and those callbacks are then responsible for resuming the devices as appropriate, if necessary. For example, in some cases they may need to queue up runtime resume requests for the devices using pm_request_resume(). Changelog partly based on an Alan Stern's description of the idea (http://marc.info/?l=linux-pm&m=139940466625569&w=2). Signed-off-by: Rafael J. Wysocki Acked-by: Alan Stern --- drivers/base/power/main.c | 66 +++++++++++++++++++++++++++++++++++----------- include/linux/pm.h | 36 +++++++++++++++++++------ include/linux/pm_runtime.h | 6 +++++ 3 files changed, 85 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 86d5e4fb5b98..343ffad59377 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -479,7 +479,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; if (!dev->power.is_noirq_suspended) @@ -605,7 +605,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; if (!dev->power.is_late_suspended) @@ -735,6 +735,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + /* Match the pm_runtime_disable() in __device_suspend(). */ + pm_runtime_enable(dev); + goto Complete; + } + dpm_wait(dev->parent, async); dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1007,7 +1013,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a goto Complete; } - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Complete; dpm_wait_for_children(dev, async); @@ -1146,7 +1152,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as goto Complete; } - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Complete; dpm_wait_for_children(dev, async); @@ -1332,6 +1338,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + if (pm_runtime_status_suspended(dev)) { + pm_runtime_disable(dev); + if (pm_runtime_suspended_if_enabled(dev)) + goto Complete; + + pm_runtime_enable(dev); + } + dev->power.direct_complete = false; + } + dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1382,10 +1399,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { + struct device *parent = dev->parent; + dev->power.is_suspended = true; - if (dev->power.wakeup_path - && dev->parent && !dev->parent->power.ignore_children) - dev->parent->power.wakeup_path = true; + if (parent) { + spin_lock_irq(&parent->power.lock); + + dev->parent->power.direct_complete = false; + if (dev->power.wakeup_path + && !dev->parent->power.ignore_children) + dev->parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); + } } device_unlock(dev); @@ -1487,7 +1513,7 @@ static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; char *info = NULL; - int error = 0; + int ret = 0; if (dev->power.syscore) return 0; @@ -1523,17 +1549,27 @@ static int device_prepare(struct device *dev, pm_message_t state) callback = dev->driver->pm->prepare; } - if (callback) { - error = callback(dev); - suspend_report_result(callback, error); - } + if (callback) + ret = callback(dev); device_unlock(dev); - if (error) + if (ret < 0) { + suspend_report_result(callback, ret); pm_runtime_put(dev); - - return error; + return ret; + } + /* + * A positive return value from ->prepare() means "this device appears + * to be runtime-suspended and its state is fine, so if it really is + * runtime-suspended, you can leave it in that state provided that you + * will do the same thing with all of its descendants". This only + * applies to suspend transitions, however. + */ + spin_lock_irq(&dev->power.lock); + dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; + spin_unlock_irq(&dev->power.lock); + return 0; } /** diff --git a/include/linux/pm.h b/include/linux/pm.h index d915d0345fa1..72c0fe098a27 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -93,13 +93,23 @@ typedef struct pm_message { * been registered) to recover from the race condition. * This method is executed for all kinds of suspend transitions and is * followed by one of the suspend callbacks: @suspend(), @freeze(), or - * @poweroff(). The PM core executes subsystem-level @prepare() for all - * devices before starting to invoke suspend callbacks for any of them, so - * generally devices may be assumed to be functional or to respond to - * runtime resume requests while @prepare() is being executed. However, - * device drivers may NOT assume anything about the availability of user - * space at that time and it is NOT valid to request firmware from within - * @prepare() (it's too late to do that). It also is NOT valid to allocate + * @poweroff(). If the transition is a suspend to memory or standby (that + * is, not related to hibernation), the return value of @prepare() may be + * used to indicate to the PM core to leave the device in runtime suspend + * if applicable. Namely, if @prepare() returns a positive number, the PM + * core will understand that as a declaration that the device appears to be + * runtime-suspended and it may be left in that state during the entire + * transition and during the subsequent resume if all of its descendants + * are left in runtime suspend too. If that happens, @complete() will be + * executed directly after @prepare() and it must ensure the proper + * functioning of the device after the system resume. + * The PM core executes subsystem-level @prepare() for all devices before + * starting to invoke suspend callbacks for any of them, so generally + * devices may be assumed to be functional or to respond to runtime resume + * requests while @prepare() is being executed. However, device drivers + * may NOT assume anything about the availability of user space at that + * time and it is NOT valid to request firmware from within @prepare() + * (it's too late to do that). It also is NOT valid to allocate * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. * [To work around these limitations, drivers may register suspend and * hibernation notifiers to be executed before the freezing of tasks.] @@ -112,7 +122,16 @@ typedef struct pm_message { * of the other devices that the PM core has unsuccessfully attempted to * suspend earlier). * The PM core executes subsystem-level @complete() after it has executed - * the appropriate resume callbacks for all devices. + * the appropriate resume callbacks for all devices. If the corresponding + * @prepare() at the beginning of the suspend transition returned a + * positive number and the device was left in runtime suspend (without + * executing any suspend and resume callbacks for it), @complete() will be + * the only callback executed for the device during resume. In that case, + * @complete() must be prepared to do whatever is necessary to ensure the + * proper functioning of the device after the system resume. To this end, + * @complete() can check the power.direct_complete flag of the device to + * learn whether (unset) or not (set) the previous suspend and resume + * callbacks have been executed for it. * * @suspend: Executed before putting the system into a sleep state in which the * contents of main memory are preserved. The exact action to perform @@ -546,6 +565,7 @@ struct dev_pm_info { bool is_late_suspended:1; bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ + bool direct_complete:1; /* Owned by the PM core */ spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2a5897a4afbc..43fd6716f662 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -101,6 +101,11 @@ static inline bool pm_runtime_status_suspended(struct device *dev) return dev->power.runtime_status == RPM_SUSPENDED; } +static inline bool pm_runtime_suspended_if_enabled(struct device *dev) +{ + return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1; +} + static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; @@ -150,6 +155,7 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} -- cgit v1.2.3 From b8802f76fe473d91886220498aeda157c492f2d1 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Sun, 11 May 2014 16:05:58 +0800 Subject: irqchip: gic: Use mask field in GICC_IAR Bit[9:0] is interrupt ID field in GICC_IAR. Bit[12:10] is CPU ID field, and others are reserved. So we should use GICC_IAR_INT_ID_MASK to get interrupt ID. It's not a good way to use ~0x1c00 (CPU ID field) to get interrupt ID. Signed-off-by: Haojian Zhuang Link: https://lkml.kernel.org/r/1399795571-17231-3-git-send-email-haojian.zhuang@linaro.org Signed-off-by: Jason Cooper --- drivers/irqchip/irq-gic.c | 2 +- include/linux/irqchip/arm-gic.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4300b6606f5e..f711fb6af7a9 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -287,7 +287,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) do { irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); - irqnr = irqstat & ~0x1c00; + irqnr = irqstat & GICC_IAR_INT_ID_MASK; if (likely(irqnr > 15 && irqnr < 1021)) { irqnr = irq_find_mapping(gic->domain, irqnr); diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 7ed92d0560d5..45e2d8c15bd2 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -21,6 +21,8 @@ #define GIC_CPU_ACTIVEPRIO 0xd0 #define GIC_CPU_IDENT 0xfc +#define GICC_IAR_INT_ID_MASK 0x3ff + #define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTR 0x004 #define GIC_DIST_IGROUP 0x080 -- cgit v1.2.3 From 1429d7c9467e1e3de0b0ff91d7e4d67c1a92f8a3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 May 2014 09:23:55 -0600 Subject: blk-mq: switch ctx pending map to the sparser blk_align_bitmap Each hardware queue has a bitmap of software queues with pending requests. When new IO is queued on a software queue, the bit is set, and when IO is pruned on a hardware queue run, the bit is cleared. This causes a lot of traffic. Switch this from the regular BITS_PER_LONG bitmap to a sparser layout, similarly to what was done for blk-mq tagging. 20% performance increase was observed for single threaded IO, and about 15% performanc increase on multiple threads driving the same device. Signed-off-by: Jens Axboe --- block/blk-mq.c | 119 +++++++++++++++++++++++++++++++++++++------------ include/linux/blk-mq.h | 10 ++++- 2 files changed, 99 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 526feee31bff..e862c4408427 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -56,21 +56,40 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { unsigned int i; - for (i = 0; i < hctx->nr_ctx_map; i++) - if (hctx->ctx_map[i]) + for (i = 0; i < hctx->ctx_map.map_size; i++) + if (hctx->ctx_map.map[i].word) return true; return false; } +static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; +} + +#define CTX_TO_BIT(hctx, ctx) \ + ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) + /* * Mark this ctx as having pending work in this hardware queue */ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { - if (!test_bit(ctx->index_hw, hctx->ctx_map)) - set_bit(ctx->index_hw, hctx->ctx_map); + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) + set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); +} + +static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, @@ -614,6 +633,40 @@ static bool blk_mq_attempt_merge(struct request_queue *q, return false; } +/* + * Process software queues that have been marked busy, splicing them + * to the for-dispatch + */ +static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) +{ + struct blk_mq_ctx *ctx; + int i; + + for (i = 0; i < hctx->ctx_map.map_size; i++) { + struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; + unsigned int off, bit; + + if (!bm->word) + continue; + + bit = 0; + off = i * hctx->ctx_map.bits_per_word; + do { + bit = find_next_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + ctx = hctx->ctxs[bit + off]; + clear_bit(bit, &bm->word); + spin_lock(&ctx->lock); + list_splice_tail_init(&ctx->rq_list, list); + spin_unlock(&ctx->lock); + + bit++; + } while (1); + } +} + /* * Run this hardware queue, pulling any software queues mapped to it in. * Note that this function currently has various problems around ordering @@ -623,10 +676,9 @@ static bool blk_mq_attempt_merge(struct request_queue *q, static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; - struct blk_mq_ctx *ctx; struct request *rq; LIST_HEAD(rq_list); - int bit, queued; + int queued; WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); @@ -638,14 +690,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) /* * Touch any software queue that has pending entries. */ - for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { - clear_bit(bit, hctx->ctx_map); - ctx = hctx->ctxs[bit]; - - spin_lock(&ctx->lock); - list_splice_tail_init(&ctx->rq_list, &rq_list); - spin_unlock(&ctx->lock); - } + flush_busy_ctxs(hctx, &rq_list); /* * If we have previous entries on our dispatch list, grab them @@ -658,14 +703,10 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) spin_unlock(&hctx->lock); } - /* - * Delete and return all entries from our dispatch list - */ - queued = 0; - /* * Now process all the entries, sending them to the driver. */ + queued = 0; while (!list_empty(&rq_list)) { int ret; @@ -1158,7 +1199,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_list)) { list_splice_init(&ctx->rq_list, &tmp); - clear_bit(ctx->index_hw, hctx->ctx_map); + blk_mq_hctx_clear_pending(hctx, ctx); } spin_unlock(&ctx->lock); @@ -1298,6 +1339,34 @@ fail: return NULL; } +static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap) +{ + kfree(bitmap->map); +} + +static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) +{ + unsigned int bpw = 8, total, num_maps, i; + + bitmap->bits_per_word = bpw; + + num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; + bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap), + GFP_KERNEL, node); + if (!bitmap->map) + return -ENOMEM; + + bitmap->map_size = num_maps; + + total = nr_cpu_ids; + for (i = 0; i < num_maps; i++) { + bitmap->map[i].depth = min(total, bitmap->bits_per_word); + total -= bitmap->map[i].depth; + } + + return 0; +} + static int blk_mq_init_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) { @@ -1308,7 +1377,6 @@ static int blk_mq_init_hw_queues(struct request_queue *q, * Initialize hardware queues */ queue_for_each_hw_ctx(q, hctx, i) { - unsigned int num_maps; int node; node = hctx->numa_node; @@ -1339,13 +1407,9 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (!hctx->ctxs) break; - num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG; - hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long), - GFP_KERNEL, node); - if (!hctx->ctx_map) + if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) break; - hctx->nr_ctx_map = num_maps; hctx->nr_ctx = 0; if (set->ops->init_hctx && @@ -1368,7 +1432,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); kfree(hctx->ctxs); - kfree(hctx->ctx_map); + blk_mq_free_bitmap(&hctx->ctx_map); } return 1; @@ -1542,7 +1606,6 @@ void blk_mq_free_queue(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { - kfree(hctx->ctx_map); kfree(hctx->ctxs); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f83d15f6e1c1..952e558ee598 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -11,6 +11,12 @@ struct blk_mq_cpu_notifier { void (*notify)(void *data, unsigned long action, unsigned int cpu); }; +struct blk_mq_ctxmap { + unsigned int map_size; + unsigned int bits_per_word; + struct blk_align_bitmap *map; +}; + struct blk_mq_hw_ctx { struct { spinlock_t lock; @@ -31,8 +37,8 @@ struct blk_mq_hw_ctx { void *driver_data; - unsigned int nr_ctx_map; - unsigned long *ctx_map; + struct blk_mq_ctxmap ctx_map; + unsigned int nr_ctx; struct blk_mq_ctx **ctxs; -- cgit v1.2.3 From 61f38db3e3c0e4c3be0858750e2cabeadaecac0c Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Sat, 26 Apr 2014 23:15:35 -0700 Subject: rcu: Provide API to suppress stall warnings while sysrc runs Some sysrq handlers can run for a long time, because they dump a lot of data onto a serial console. Having RCU stall warnings pop up in the middle of them only makes the problem worse. This commit provides rcu_sysrq_start() and rcu_sysrq_end() APIs to temporarily suppress RCU CPU stall warnings while a sysrq request is handled. Signed-off-by: Rik van Riel [ paulmck: Fix TINY_RCU build error. ] Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 12 ++++++++++++ kernel/rcu/update.c | 12 ++++++++++++ 2 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9ccd644c1234..5a75d19aa661 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -248,6 +248,18 @@ void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); +#ifdef CONFIG_RCU_STALL_COMMON +void rcu_sysrq_start(void); +void rcu_sysrq_end(void); +#else /* #ifdef CONFIG_RCU_STALL_COMMON */ +static inline void rcu_sysrq_start(void) +{ +} +static inline void rcu_sysrq_end(void) +{ +} +#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ + #ifdef CONFIG_RCU_USER_QS void rcu_user_enter(void); void rcu_user_exit(void); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index ed7a0d72562c..a2aeb4df0f60 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -320,6 +320,18 @@ int rcu_jiffies_till_stall_check(void) return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; } +void rcu_sysrq_start(void) +{ + if (!rcu_cpu_stall_suppress) + rcu_cpu_stall_suppress = 2; +} + +void rcu_sysrq_end(void) +{ + if (rcu_cpu_stall_suppress == 2) + rcu_cpu_stall_suppress = 0; +} + static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) { rcu_cpu_stall_suppress = 1; -- cgit v1.2.3 From 8d9e9857c576d8d710ae6a6152a6ddcd29772bb1 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 19 May 2014 14:34:09 +0100 Subject: goldfish: fix >> 32 warning We should be checking for a 64bit platform not 64bit DMA address types in the case of Goldfish. The Goldfish virtual platform is either 32/32 or 64/64. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- include/linux/goldfish.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h index 9cc28902b54c..569236e6b2bc 100644 --- a/include/linux/goldfish.h +++ b/include/linux/goldfish.h @@ -7,7 +7,7 @@ static inline void gf_write64(unsigned long data, void __iomem *portl, void __iomem *porth) { writel((u32)data, portl); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_64BIT writel(data>>32, porth); #endif } -- cgit v1.2.3 From 5080a08d0f8a4b2ba3a15e5ddc5ece84a444cad8 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Fri, 21 Mar 2014 10:46:39 +0100 Subject: mmc: mmci: Enforce max frequency configuration through DT Remove the option to provide a maximum frequency as platform data, enforce it through DT. Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 5 +---- include/linux/amba/mmci.h | 4 ---- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 9c60325f1a30..758efea184c9 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1500,13 +1500,10 @@ static int mmci_probe(struct amba_device *dev, * If no maximum operating frequency is supplied, fall back to use * the module parameter, which has a (low) default value in case it * is not specified. Either value must not exceed the clock rate into - * the block, of course. Also note that DT takes precedence over - * platform data. + * the block, of course. */ if (mmc->f_max) mmc->f_max = min(host->mclk, mmc->f_max); - else if (plat->f_max) - mmc->f_max = min(host->mclk, plat->f_max); else mmc->f_max = min(host->mclk, fmax); dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index b992fc931295..3f95d32d5277 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -12,9 +12,6 @@ struct dma_chan; /** * struct mmci_platform_data - platform configuration for the MMCI * (also known as PL180) block. - * @f_max: the maximum operational frequency for this host in this - * platform configuration. When this is specified it takes precedence - * over the module parameter for the same frequency. * @ocr_mask: available voltages on the 4 pins from the block, this * is ignored if a regulator is used, see the MMC_VDD_* masks in * mmc/host.h @@ -42,7 +39,6 @@ struct dma_chan; * bidirectional channel */ struct mmci_platform_data { - unsigned int f_max; unsigned int ocr_mask; int (*ios_handler)(struct device *, struct mmc_ios *); unsigned int (*status)(struct device *); -- cgit v1.2.3 From f1af9d3af308145478749194346f11efad1134b2 Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Wed, 29 Jan 2014 18:16:01 +0100 Subject: mm/memblock: Do some refactoring, enhance API Refactor the memblock code and extend the memblock API to make it more flexible. With the extended API it is simple to define and work with additional memory lists. The static functions memblock_add_region and __memblock_remove are renamed to memblock_add_range and meblock_remove_range and added to the memblock API. The __next_free_mem_range and __next_free_mem_range_rev functions are replaced with calls to the more generic list walkers __next_mem_range and __next_mem_range_rev. To walk an arbitrary memory list two new macros for_each_mem_range and for_each_mem_range_rev are added. These new macros are used to define for_each_free_mem_range and for_each_free_mem_range_reverse. Signed-off-by: Philipp Hachtmann Signed-off-by: Martin Schwidefsky --- include/linux/memblock.h | 75 ++++++++++++++---- mm/memblock.c | 193 ++++++++++++++++++++++++++++++----------------- 2 files changed, 183 insertions(+), 85 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 8a20a51ed42d..f669016874b3 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -71,6 +71,63 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); + +/* Low level functions */ +int memblock_add_range(struct memblock_type *type, + phys_addr_t base, phys_addr_t size, + int nid, unsigned long flags); + +int memblock_remove_range(struct memblock_type *type, + phys_addr_t base, + phys_addr_t size); + +void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +/** + * for_each_mem_range - iterate through memblock areas from type_a and not + * included in type_b. Or just type_a if type_b is NULL. + * @i: u64 used as loop variable + * @type_a: ptr to memblock_type to iterate + * @type_b: ptr to memblock_type which excludes from the iteration + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + */ +#define for_each_mem_range(i, type_a, type_b, nid, \ + p_start, p_end, p_nid) \ + for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid)) + +/** + * for_each_mem_range_rev - reverse iterate through memblock areas from + * type_a and not included in type_b. Or just type_a if type_b is NULL. + * @i: u64 used as loop variable + * @type_a: ptr to memblock_type to iterate + * @type_b: ptr to memblock_type which excludes from the iteration + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + */ +#define for_each_mem_range_rev(i, type_a, type_b, nid, \ + p_start, p_end, p_nid) \ + for (i = (u64)ULLONG_MAX, \ + __next_mem_range_rev(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range_rev(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid)) + #ifdef CONFIG_MOVABLE_NODE static inline bool memblock_is_hotpluggable(struct memblock_region *m) { @@ -113,9 +170,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid); - /** * for_each_free_mem_range - iterate through free memblock areas * @i: u64 used as loop variable @@ -128,13 +182,8 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, * soon as memblock is initialized. */ #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ - for (i = 0, \ - __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) - -void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid); + for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ + nid, p_start, p_end, p_nid) /** * for_each_free_mem_range_reverse - rev-iterate through free memblock areas @@ -148,10 +197,8 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, * order. Available as soon as memblock is initialized. */ #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ - for (i = (u64)ULLONG_MAX, \ - __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) + for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ + nid, p_start, p_end, p_nid) static inline void memblock_set_region_flags(struct memblock_region *r, unsigned long flags) diff --git a/mm/memblock.c b/mm/memblock.c index e9d6ca9a01a9..9edd0928daab 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -472,7 +472,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, } /** - * memblock_add_region - add new memblock region + * memblock_add_range - add new memblock region * @type: memblock type to add new region into * @base: base address of the new region * @size: size of the new region @@ -487,7 +487,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, * RETURNS: * 0 on success, -errno on failure. */ -static int __init_memblock memblock_add_region(struct memblock_type *type, +int __init_memblock memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) { @@ -569,12 +569,12 @@ repeat: int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) { - return memblock_add_region(&memblock.memory, base, size, nid, 0); + return memblock_add_range(&memblock.memory, base, size, nid, 0); } int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { - return memblock_add_region(&memblock.memory, base, size, + return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); } @@ -654,8 +654,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, return 0; } -static int __init_memblock __memblock_remove(struct memblock_type *type, - phys_addr_t base, phys_addr_t size) +int __init_memblock memblock_remove_range(struct memblock_type *type, + phys_addr_t base, phys_addr_t size) { int start_rgn, end_rgn; int i, ret; @@ -671,9 +671,10 @@ static int __init_memblock __memblock_remove(struct memblock_type *type, int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) { - return __memblock_remove(&memblock.memory, base, size); + return memblock_remove_range(&memblock.memory, base, size); } + int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) { memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", @@ -681,7 +682,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) (unsigned long long)base + size - 1, (void *)_RET_IP_); - return __memblock_remove(&memblock.reserved, base, size); + return memblock_remove_range(&memblock.reserved, base, size); } static int __init_memblock memblock_reserve_region(phys_addr_t base, @@ -696,7 +697,7 @@ static int __init_memblock memblock_reserve_region(phys_addr_t base, (unsigned long long)base + size - 1, flags, (void *)_RET_IP_); - return memblock_add_region(_rgn, base, size, nid, flags); + return memblock_add_range(_rgn, base, size, nid, flags); } int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) @@ -758,17 +759,19 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) } /** - * __next_free_mem_range - next function for for_each_free_mem_range() + * __next__mem_range - next function for for_each_free_mem_range() etc. * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes + * @type_a: pointer to memblock_type from where the range is taken + * @type_b: pointer to memblock_type which excludes memory from being taken * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * - * Find the first free area from *@idx which matches @nid, fill the out + * Find the first area from *@idx which matches @nid, fill the out * parameters, and update *@idx for the next iteration. The lower 32bit of - * *@idx contains index into memory region and the upper 32bit indexes the - * areas before each reserved region. For example, if reserved regions + * *@idx contains index into type_a and the upper 32bit indexes the + * areas before each region in type_b. For example, if type_b regions * look like the following, * * 0:[0-16), 1:[32-48), 2:[128-130) @@ -780,53 +783,77 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) * As both region arrays are sorted, the function advances the two indices * in lockstep and returns each intersection. */ -void __init_memblock __next_free_mem_range(u64 *idx, int nid, - phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid) +void __init_memblock __next_mem_range(u64 *idx, int nid, + struct memblock_type *type_a, + struct memblock_type *type_b, + phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid) { - struct memblock_type *mem = &memblock.memory; - struct memblock_type *rsv = &memblock.reserved; - int mi = *idx & 0xffffffff; - int ri = *idx >> 32; + int idx_a = *idx & 0xffffffff; + int idx_b = *idx >> 32; - if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) + if (WARN_ONCE(nid == MAX_NUMNODES, + "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) nid = NUMA_NO_NODE; - for ( ; mi < mem->cnt; mi++) { - struct memblock_region *m = &mem->regions[mi]; + for (; idx_a < type_a->cnt; idx_a++) { + struct memblock_region *m = &type_a->regions[idx_a]; + phys_addr_t m_start = m->base; phys_addr_t m_end = m->base + m->size; + int m_nid = memblock_get_region_node(m); /* only memory regions are associated with nodes, check it */ - if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m)) + if (nid != NUMA_NO_NODE && nid != m_nid) continue; - /* scan areas before each reservation for intersection */ - for ( ; ri < rsv->cnt + 1; ri++) { - struct memblock_region *r = &rsv->regions[ri]; - phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; - phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; + if (!type_b) { + if (out_start) + *out_start = m_start; + if (out_end) + *out_end = m_end; + if (out_nid) + *out_nid = m_nid; + idx_a++; + *idx = (u32)idx_a | (u64)idx_b << 32; + return; + } + + /* scan areas before each reservation */ + for (; idx_b < type_b->cnt + 1; idx_b++) { + struct memblock_region *r; + phys_addr_t r_start; + phys_addr_t r_end; + + r = &type_b->regions[idx_b]; + r_start = idx_b ? r[-1].base + r[-1].size : 0; + r_end = idx_b < type_b->cnt ? + r->base : ULLONG_MAX; - /* if ri advanced past mi, break out to advance mi */ + /* + * if idx_b advanced past idx_a, + * break out to advance idx_a + */ if (r_start >= m_end) break; /* if the two regions intersect, we're done */ if (m_start < r_end) { if (out_start) - *out_start = max(m_start, r_start); + *out_start = + max(m_start, r_start); if (out_end) *out_end = min(m_end, r_end); if (out_nid) - *out_nid = memblock_get_region_node(m); + *out_nid = m_nid; /* - * The region which ends first is advanced - * for the next iteration. + * The region which ends first is + * advanced for the next iteration. */ if (m_end <= r_end) - mi++; + idx_a++; else - ri++; - *idx = (u32)mi | (u64)ri << 32; + idx_b++; + *idx = (u32)idx_a | (u64)idx_b << 32; return; } } @@ -837,57 +864,80 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, } /** - * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() + * __next_mem_range_rev - generic next function for for_each_*_range_rev() + * + * Finds the next range from type_a which is not marked as unsuitable + * in type_b. + * * @idx: pointer to u64 loop variable * @nid: nid: node selector, %NUMA_NO_NODE for all nodes + * @type_a: pointer to memblock_type from where the range is taken + * @type_b: pointer to memblock_type which excludes memory from being taken * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * - * Reverse of __next_free_mem_range(). - * - * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't - * be able to hot-remove hotpluggable memory used by the kernel. So this - * function skip hotpluggable regions if needed when allocating memory for the - * kernel. + * Reverse of __next_mem_range(). */ -void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, - phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid) +void __init_memblock __next_mem_range_rev(u64 *idx, int nid, + struct memblock_type *type_a, + struct memblock_type *type_b, + phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid) { - struct memblock_type *mem = &memblock.memory; - struct memblock_type *rsv = &memblock.reserved; - int mi = *idx & 0xffffffff; - int ri = *idx >> 32; + int idx_a = *idx & 0xffffffff; + int idx_b = *idx >> 32; if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) nid = NUMA_NO_NODE; if (*idx == (u64)ULLONG_MAX) { - mi = mem->cnt - 1; - ri = rsv->cnt; + idx_a = type_a->cnt - 1; + idx_b = type_b->cnt; } - for ( ; mi >= 0; mi--) { - struct memblock_region *m = &mem->regions[mi]; + for (; idx_a >= 0; idx_a--) { + struct memblock_region *m = &type_a->regions[idx_a]; + phys_addr_t m_start = m->base; phys_addr_t m_end = m->base + m->size; + int m_nid = memblock_get_region_node(m); /* only memory regions are associated with nodes, check it */ - if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m)) + if (nid != NUMA_NO_NODE && nid != m_nid) continue; /* skip hotpluggable memory regions if needed */ if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) continue; - /* scan areas before each reservation for intersection */ - for ( ; ri >= 0; ri--) { - struct memblock_region *r = &rsv->regions[ri]; - phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; - phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; + if (!type_b) { + if (out_start) + *out_start = m_start; + if (out_end) + *out_end = m_end; + if (out_nid) + *out_nid = m_nid; + idx_a++; + *idx = (u32)idx_a | (u64)idx_b << 32; + return; + } + + /* scan areas before each reservation */ + for (; idx_b >= 0; idx_b--) { + struct memblock_region *r; + phys_addr_t r_start; + phys_addr_t r_end; + + r = &type_b->regions[idx_b]; + r_start = idx_b ? r[-1].base + r[-1].size : 0; + r_end = idx_b < type_b->cnt ? + r->base : ULLONG_MAX; + /* + * if idx_b advanced past idx_a, + * break out to advance idx_a + */ - /* if ri advanced past mi, break out to advance mi */ if (r_end <= m_start) break; /* if the two regions intersect, we're done */ @@ -897,18 +947,17 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, if (out_end) *out_end = min(m_end, r_end); if (out_nid) - *out_nid = memblock_get_region_node(m); - + *out_nid = m_nid; if (m_start >= r_start) - mi--; + idx_a--; else - ri--; - *idx = (u32)mi | (u64)ri << 32; + idx_b--; + *idx = (u32)idx_a | (u64)idx_b << 32; return; } } } - + /* signal end of iteration */ *idx = ULLONG_MAX; } @@ -1201,7 +1250,7 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) __func__, (u64)base, (u64)base + size - 1, (void *)_RET_IP_); kmemleak_free_part(__va(base), size); - __memblock_remove(&memblock.reserved, base, size); + memblock_remove_range(&memblock.reserved, base, size); } /* @@ -1287,8 +1336,10 @@ void __init memblock_enforce_memory_limit(phys_addr_t limit) } /* truncate both memory and reserved regions */ - __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); - __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); + memblock_remove_range(&memblock.memory, max_addr, + (phys_addr_t)ULLONG_MAX); + memblock_remove_range(&memblock.reserved, max_addr, + (phys_addr_t)ULLONG_MAX); } static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) -- cgit v1.2.3 From 70210ed950b538ee7eb811dccc402db9df1c9be4 Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Wed, 29 Jan 2014 18:16:01 +0100 Subject: mm/memblock: add physical memory list Add the physmem list to the memblock structure. This list only exists if HAVE_MEMBLOCK_PHYS_MAP is selected and contains the unmodified list of physically available memory. It differs from the memblock memory list as it always contains all memory ranges even if the memory has been restricted, e.g. by use of the mem= kernel parameter. Signed-off-by: Philipp Hachtmann Signed-off-by: Martin Schwidefsky --- include/linux/memblock.h | 4 ++++ mm/Kconfig | 3 +++ mm/memblock.c | 12 ++++++++++++ 3 files changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f669016874b3..73dc382e72d8 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -18,6 +18,7 @@ #include #define INIT_MEMBLOCK_REGIONS 128 +#define INIT_PHYSMEM_REGIONS 4 /* Definition of memblock flags. */ #define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ @@ -43,6 +44,9 @@ struct memblock { phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP + struct memblock_type physmem; +#endif }; extern struct memblock memblock; diff --git a/mm/Kconfig b/mm/Kconfig index 1b5a95f0fa01..28cec518f4d4 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -134,6 +134,9 @@ config HAVE_MEMBLOCK config HAVE_MEMBLOCK_NODE_MAP boolean +config HAVE_MEMBLOCK_PHYS_MAP + boolean + config ARCH_DISCARD_MEMBLOCK boolean diff --git a/mm/memblock.c b/mm/memblock.c index 9edd0928daab..a810ba923cdd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -27,6 +27,9 @@ static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP +static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; +#endif struct memblock memblock __initdata_memblock = { .memory.regions = memblock_memory_init_regions, @@ -37,6 +40,12 @@ struct memblock memblock __initdata_memblock = { .reserved.cnt = 1, /* empty dummy entry */ .reserved.max = INIT_MEMBLOCK_REGIONS, +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP + .physmem.regions = memblock_physmem_init_regions, + .physmem.cnt = 1, /* empty dummy entry */ + .physmem.max = INIT_PHYSMEM_REGIONS, +#endif + .bottom_up = false, .current_limit = MEMBLOCK_ALLOC_ANYWHERE, }; @@ -1553,6 +1562,9 @@ static int __init memblock_init_debugfs(void) return -ENXIO; debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP + debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); +#endif return 0; } -- cgit v1.2.3 From 4cf563c5d97c83d4b2fb3a778dd7d5e362cc3e34 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Thu, 15 May 2014 16:40:23 +0300 Subject: ACPI / PM: Export rest of the subsys PM callbacks No reason for excluding the remaining ones. Signed-off-by: Heikki Krogerus [rjw: Rebased and exported the new acpi_subsys_complete() too.] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/device_pm.c | 5 ++++- include/linux/acpi.h | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 9e5fd9c440b7..49a51277f81d 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -928,7 +928,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare); * acpi_subsys_complete - Finalize device's resume during system resume. * @dev: Device to handle. */ -static void acpi_subsys_complete(struct device *dev) +void acpi_subsys_complete(struct device *dev) { /* * If the device had been runtime-suspended before the system went into @@ -938,6 +938,7 @@ static void acpi_subsys_complete(struct device *dev) if (dev->power.direct_complete) pm_request_resume(dev); } +EXPORT_SYMBOL_GPL(acpi_subsys_complete); /** * acpi_subsys_suspend - Run the device driver's suspend callback. @@ -951,6 +952,7 @@ int acpi_subsys_suspend(struct device *dev) pm_runtime_resume(dev); return pm_generic_suspend(dev); } +EXPORT_SYMBOL_GPL(acpi_subsys_suspend); /** * acpi_subsys_suspend_late - Suspend device using ACPI. @@ -996,6 +998,7 @@ int acpi_subsys_freeze(struct device *dev) pm_runtime_resume(dev); return pm_generic_freeze(dev); } +EXPORT_SYMBOL_GPL(acpi_subsys_freeze); #endif /* CONFIG_PM_SLEEP */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7a8f2cd66c8b..4c007262e891 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -554,14 +554,20 @@ static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } int acpi_dev_suspend_late(struct device *dev); int acpi_dev_resume_early(struct device *dev); int acpi_subsys_prepare(struct device *dev); +void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_resume_early(struct device *dev); +int acpi_subsys_suspend(struct device *dev); +int acpi_subsys_freeze(struct device *dev); #else static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } static inline int acpi_dev_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_prepare(struct device *dev) { return 0; } +static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } +static inline int acpi_subsys_suspend(struct device *dev) { return 0; } +static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM) -- cgit v1.2.3 From e2d0e90fae82809667f1dcf4d0d9baa421691c7a Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Thu, 15 May 2014 16:40:25 +0300 Subject: clk: new basic clk type for fractional divider Fractional divider clocks are fairly common. This adds basic type for them. Signed-off-by: Heikki Krogerus Acked-by: Mike Turquette Signed-off-by: Rafael J. Wysocki --- drivers/clk/Makefile | 1 + drivers/clk/clk-fractional-divider.c | 135 +++++++++++++++++++++++++++++++++++ include/linux/clk-provider.h | 31 ++++++++ 3 files changed, 167 insertions(+) create mode 100644 drivers/clk/clk-fractional-divider.c (limited to 'include/linux') diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 5f8a28735c96..0745059b1834 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o obj-$(CONFIG_COMMON_CLK) += clk-gate.o obj-$(CONFIG_COMMON_CLK) += clk-mux.o obj-$(CONFIG_COMMON_CLK) += clk-composite.o +obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o # hardware specific clock types # please keep this section sorted lexicographically by file/directory path name diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c new file mode 100644 index 000000000000..ede685ca0d20 --- /dev/null +++ b/drivers/clk/clk-fractional-divider.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Adjustable fractional divider clock implementation. + * Output rate = (m / n) * parent_rate. + */ + +#include +#include +#include +#include +#include + +#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) + +static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned long flags = 0; + u32 val, m, n; + u64 ret; + + if (fd->lock) + spin_lock_irqsave(fd->lock, flags); + + val = clk_readl(fd->reg); + + if (fd->lock) + spin_unlock_irqrestore(fd->lock, flags); + + m = (val & fd->mmask) >> fd->mshift; + n = (val & fd->nmask) >> fd->nshift; + + ret = parent_rate * m; + do_div(ret, n); + + return ret; +} + +static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned maxn = (fd->nmask >> fd->nshift) + 1; + unsigned div; + + if (!rate || rate >= *prate) + return *prate; + + div = gcd(*prate, rate); + + while ((*prate / div) > maxn) { + div <<= 1; + rate <<= 1; + } + + return rate; +} + +static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned long flags = 0; + unsigned long div; + unsigned n, m; + u32 val; + + div = gcd(parent_rate, rate); + m = rate / div; + n = parent_rate / div; + + if (fd->lock) + spin_lock_irqsave(fd->lock, flags); + + val = clk_readl(fd->reg); + val &= ~(fd->mmask | fd->nmask); + val |= (m << fd->mshift) | (n << fd->nshift); + clk_writel(val, fd->reg); + + if (fd->lock) + spin_unlock_irqrestore(fd->lock, flags); + + return 0; +} + +const struct clk_ops clk_fractional_divider_ops = { + .recalc_rate = clk_fd_recalc_rate, + .round_rate = clk_fd_round_rate, + .set_rate = clk_fd_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_fractional_divider_ops); + +struct clk *clk_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock) +{ + struct clk_fractional_divider *fd; + struct clk_init_data init; + struct clk *clk; + + fd = kzalloc(sizeof(*fd), GFP_KERNEL); + if (!fd) { + dev_err(dev, "could not allocate fractional divider clk\n"); + return ERR_PTR(-ENOMEM); + } + + init.name = name; + init.ops = &clk_fractional_divider_ops; + init.flags = flags | CLK_IS_BASIC; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + + fd->reg = reg; + fd->mshift = mshift; + fd->mmask = (BIT(mwidth) - 1) << mshift; + fd->nshift = nshift; + fd->nmask = (BIT(nwidth) - 1) << nshift; + fd->flags = clk_divider_flags; + fd->lock = lock; + fd->hw.init = &init; + + clk = clk_register(dev, &fd->hw); + if (IS_ERR(clk)) + kfree(fd); + + return clk; +} +EXPORT_SYMBOL_GPL(clk_register_fractional_divider); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 511917416fb0..fb4eca6907cd 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -413,6 +413,37 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); +/** + * struct clk_fractional_divider - adjustable fractional divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @mshift: shift to the numerator bit field + * @mwidth: width of the numerator bit field + * @nshift: shift to the denominator bit field + * @nwidth: width of the denominator bit field + * @lock: register lock + * + * Clock with adjustable fractional divider affecting its output frequency. + */ + +struct clk_fractional_divider { + struct clk_hw hw; + void __iomem *reg; + u8 mshift; + u32 mmask; + u8 nshift; + u32 nmask; + u8 flags; + spinlock_t *lock; +}; + +extern const struct clk_ops clk_fractional_divider_ops; +struct clk *clk_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); + /*** * struct clk_composite - aggregate clock of mux, divider and gate clocks * -- cgit v1.2.3 From e3a2b3f931f59d5284abd13faf8bded726884ffd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 20 May 2014 11:49:02 -0600 Subject: blk-mq: allow changing of queue depth through sysfs For request_fn based devices, the block layer exports a 'nr_requests' file through sysfs to allow adjusting of queue depth on the fly. Currently this returns -EINVAL for blk-mq, since it's not wired up. Wire this up for blk-mq, so that it now also always dynamic adjustments of the allowed queue depth for any given block device managed by blk-mq. Signed-off-by: Jens Axboe --- block/blk-core.c | 41 ++++++++++++++++++++++++++ block/blk-mq-tag.c | 80 +++++++++++++++++++++++++++++++++++--------------- block/blk-mq-tag.h | 1 + block/blk-mq.c | 22 ++++++++++++++ block/blk-mq.h | 1 + block/blk-sysfs.c | 45 ++++++---------------------- block/blk.h | 2 ++ include/linux/blk-mq.h | 2 +- 8 files changed, 134 insertions(+), 60 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index a6bd3e702201..fe81e19099a1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -848,6 +848,47 @@ static void freed_request(struct request_list *rl, unsigned int flags) __freed_request(rl, sync ^ 1); } +int blk_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct request_list *rl; + + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; + blk_queue_congestion_threshold(q); + + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_SYNC); + else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_SYNC); + + if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_ASYNC); + else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_ASYNC); + + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + + spin_unlock_irq(q->queue_lock); + return 0; +} + /* * Determine if elevator data should be initialized when allocating the * request associated with @bio. diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index e6b3fbae9862..f6dea968b710 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -57,23 +57,13 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) } /* - * If a previously busy queue goes inactive, potential waiters could now - * be allowed to queue. Wake them up and check. + * Wakeup all potentially sleeping on normal (non-reserved) tags */ -void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) { - struct blk_mq_tags *tags = hctx->tags; struct blk_mq_bitmap_tags *bt; int i, wake_index; - if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - return; - - atomic_dec(&tags->active_queues); - - /* - * Will only throttle depth on non-reserved tags - */ bt = &tags->bitmap_tags; wake_index = bt->wake_index; for (i = 0; i < BT_WAIT_QUEUES; i++) { @@ -86,6 +76,22 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) } } +/* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_tags *tags = hctx->tags; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + blk_mq_tag_wakeup_all(tags); +} + /* * For shared tag users, we track the number of currently active users * and attempt to provide a fair share of the tag depth for each of them. @@ -408,6 +414,28 @@ static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) return bt->depth - used; } +static void bt_update_count(struct blk_mq_bitmap_tags *bt, + unsigned int depth) +{ + unsigned int tags_per_word = 1U << bt->bits_per_word; + unsigned int map_depth = depth; + + if (depth) { + int i; + + for (i = 0; i < bt->map_nr; i++) { + bt->map[i].depth = min(map_depth, tags_per_word); + map_depth -= bt->map[i].depth; + } + } + + bt->wake_cnt = BT_WAIT_BATCH; + if (bt->wake_cnt > depth / 4) + bt->wake_cnt = max(1U, depth / 4); + + bt->depth = depth; +} + static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, int node, bool reserved) { @@ -420,7 +448,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, * condition. */ if (depth) { - unsigned int nr, i, map_depth, tags_per_word; + unsigned int nr, tags_per_word; tags_per_word = (1 << bt->bits_per_word); @@ -444,11 +472,6 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, return -ENOMEM; bt->map_nr = nr; - map_depth = depth; - for (i = 0; i < nr; i++) { - bt->map[i].depth = min(map_depth, tags_per_word); - map_depth -= tags_per_word; - } } bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); @@ -460,11 +483,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, for (i = 0; i < BT_WAIT_QUEUES; i++) init_waitqueue_head(&bt->bs[i].wait); - bt->wake_cnt = BT_WAIT_BATCH; - if (bt->wake_cnt > depth / 4) - bt->wake_cnt = max(1U, depth / 4); - - bt->depth = depth; + bt_update_count(bt, depth); return 0; } @@ -525,6 +544,21 @@ void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) *tag = prandom_u32() % depth; } +int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) +{ + tdepth -= tags->nr_reserved_tags; + if (tdepth > tags->nr_tags) + return -EINVAL; + + /* + * Don't need (or can't) update reserved tags here, they remain + * static and should never need resizing. + */ + bt_update_count(&tags->bitmap_tags, tdepth); + blk_mq_tag_wakeup_all(tags); + return 0; +} + ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index e144f68ec45f..e7ff5ceeeb97 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -55,6 +55,7 @@ extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); +extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index 0fbef7e9bef1..7b71ab1b1536 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1789,6 +1789,28 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) } EXPORT_SYMBOL(blk_mq_free_tag_set); +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct blk_mq_tag_set *set = q->tag_set; + struct blk_mq_hw_ctx *hctx; + int i, ret; + + if (!set || nr > set->queue_depth) + return -EINVAL; + + ret = 0; + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_tag_update_depth(hctx->tags, nr); + if (ret) + break; + } + + if (!ret) + q->nr_requests = nr; + + return ret; +} + void blk_mq_disable_hotplug(void) { mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 5e5a378962b7..7db4fe4bd002 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -32,6 +32,7 @@ void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); /* * CPU hotplug helpers diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 7500f876dae4..4d6811ac13fd 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl; unsigned long nr; - int ret; + int ret, err; - if (!q->request_fn) + if (!q->request_fn && !q->mq_ops) return -EINVAL; ret = queue_var_store(&nr, page, count); @@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; - spin_lock_irq(q->queue_lock); - q->nr_requests = nr; - blk_queue_congestion_threshold(q); - - /* congestion isn't cgroup aware and follows root blkcg for now */ - rl = &q->root_rl; - - if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_SYNC); - else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_SYNC); - - if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_ASYNC); - else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_ASYNC); - - blk_queue_for_each_rl(rl, q) { - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_SYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); - } - - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_ASYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } - } + if (q->request_fn) + err = blk_update_nr_requests(q, nr); + else + err = blk_mq_update_nr_requests(q, nr); + + if (err) + return err; - spin_unlock_irq(q->queue_lock); return ret; } diff --git a/block/blk.h b/block/blk.h index 95cab70000e3..45385e9abf6f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -188,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) return q->nr_congestion_off; } +extern int blk_update_nr_requests(struct request_queue *, unsigned int); + /* * Contribute to IO statistics IFF: * diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a06ca7b5ea05..f45424453338 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -63,7 +63,7 @@ struct blk_mq_hw_ctx { struct blk_mq_tag_set { struct blk_mq_ops *ops; unsigned int nr_hw_queues; - unsigned int queue_depth; + unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; unsigned int cmd_size; /* per-request extra data */ int numa_node; -- cgit v1.2.3 From 78d683e838a60ec4ba4591cca4364cba84a9e626 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 19 May 2014 15:58:32 -0700 Subject: mm, fs: Add vm_ops->name as an alternative to arch_vma_name arch_vma_name sucks. It's a silly hack, and it's annoying to implement correctly. In fact, AFAICS, even the straightforward x86 implementation is incorrect (I suspect that it breaks if the vdso mapping is split or gets remapped). This adds a new vm_ops->name operation that can replace it. The followup patches will remove all uses of arch_vma_name on x86, fixing a couple of annoyances in the process. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/2eee21791bb36a0a408c5c2bdb382a9e6a41ca4a.1400538962.git.luto@amacapital.net Signed-off-by: H. Peter Anvin --- fs/binfmt_elf.c | 8 ++++++++ fs/proc/task_mmu.c | 6 ++++++ include/linux/mm.h | 6 ++++++ 3 files changed, 20 insertions(+) (limited to 'include/linux') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index aa3cb626671e..df9ea4186d75 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1108,6 +1108,14 @@ static bool always_dump_vma(struct vm_area_struct *vma) /* Any vsyscall mappings? */ if (vma == get_gate_vma(vma->vm_mm)) return true; + + /* + * Assume that all vmas with a .name op should always be dumped. + * If this changes, a new vm_ops field can easily be added. + */ + if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) + return true; + /* * arch_vma_name() returns non-NULL for special architecture mappings, * such as vDSO sections. diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 442177b1119a..9b2f5d62ce63 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -300,6 +300,12 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) goto done; } + if (vma->vm_ops && vma->vm_ops->name) { + name = vma->vm_ops->name(vma); + if (name) + goto done; + } + name = arch_vma_name(vma); if (!name) { pid_t tid; diff --git a/include/linux/mm.h b/include/linux/mm.h index bf9811e1321a..63f8d4efe303 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -239,6 +239,12 @@ struct vm_operations_struct { */ int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); + + /* Called by the /proc/PID/maps code to ask the vma whether it + * has a special name. Returning non-NULL will also cause this + * vma to be dumped unconditionally. */ + const char *(*name)(struct vm_area_struct *vma); + #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy -- cgit v1.2.3 From a62c34bd2a8a3f159945becd57401e478818d51c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 19 May 2014 15:58:33 -0700 Subject: x86, mm: Improve _install_special_mapping and fix x86 vdso naming Using arch_vma_name to give special mappings a name is awkward. x86 currently implements it by comparing the start address of the vma to the expected address of the vdso. This requires tracking the start address of special mappings and is probably buggy if a special vma is split or moved. Improve _install_special_mapping to just name the vma directly. Use it to give the x86 vvar area a name, which should make CRIU's life easier. As a side effect, the vvar area will show up in core dumps. This could be considered weird and is fixable. [hpa: I say we accept this as-is but be prepared to deal with knocking out the vvars from core dumps if this becomes a problem.] Cc: Cyrill Gorcunov Cc: Pavel Emelyanov Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/276b39b6b645fb11e345457b503f17b83c2c6fd0.1400538962.git.luto@amacapital.net Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/vdso.h | 6 ++- arch/x86/mm/init_64.c | 3 -- arch/x86/vdso/vdso2c.h | 5 ++- arch/x86/vdso/vdso32-setup.c | 7 ---- arch/x86/vdso/vma.c | 25 ++++++++----- include/linux/mm.h | 4 +- include/linux/mm_types.h | 6 +++ mm/mmap.c | 89 +++++++++++++++++++++++++++++--------------- 8 files changed, 94 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index d0a2c909c72d..30be253dd283 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h @@ -7,10 +7,14 @@ #ifndef __ASSEMBLER__ +#include + struct vdso_image { void *data; unsigned long size; /* Always a multiple of PAGE_SIZE */ - struct page **pages; /* Big enough for data/size page pointers */ + + /* text_mapping.pages is big enough for data/size page pointers */ + struct vm_special_mapping text_mapping; unsigned long alt, alt_len; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6f881842116c..9deb59b0baea 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1223,9 +1223,6 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == - (long __force)vma->vm_mm->context.vdso) - return "[vdso]"; if (vma == &gate_vma) return "[vsyscall]"; return NULL; diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index ed2e894e89ab..3dcc61e796e9 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h @@ -136,7 +136,10 @@ static int GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) fprintf(outfile, "const struct vdso_image %s = {\n", name); fprintf(outfile, "\t.data = raw_data,\n"); fprintf(outfile, "\t.size = %lu,\n", data_size); - fprintf(outfile, "\t.pages = pages,\n"); + fprintf(outfile, "\t.text_mapping = {\n"); + fprintf(outfile, "\t\t.name = \"[vdso]\",\n"); + fprintf(outfile, "\t\t.pages = pages,\n"); + fprintf(outfile, "\t},\n"); if (alt_sec) { fprintf(outfile, "\t.alt = %lu,\n", (unsigned long)alt_sec->sh_offset); diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index c3ed708e50f4..e4f7781ee162 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -119,13 +119,6 @@ __initcall(ia32_binfmt_init); #else /* CONFIG_X86_32 */ -const char *arch_vma_name(struct vm_area_struct *vma) -{ - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) - return "[vdso]"; - return NULL; -} - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 8ad0081df7a8..e1513c47872a 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -30,7 +30,8 @@ void __init init_vdso_image(const struct vdso_image *image) BUG_ON(image->size % PAGE_SIZE != 0); for (i = 0; i < npages; i++) - image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE); + image->text_mapping.pages[i] = + virt_to_page(image->data + i*PAGE_SIZE); apply_alternatives((struct alt_instr *)(image->data + image->alt), (struct alt_instr *)(image->data + image->alt + @@ -91,6 +92,10 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) unsigned long addr; int ret = 0; static struct page *no_pages[] = {NULL}; + static struct vm_special_mapping vvar_mapping = { + .name = "[vvar]", + .pages = no_pages, + }; if (calculate_addr) { addr = vdso_addr(current->mm->start_stack, @@ -112,21 +117,23 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) /* * MAYWRITE to allow gdb to COW and set breakpoints */ - ret = install_special_mapping(mm, - addr, - image->size, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - image->pages); + vma = _install_special_mapping(mm, + addr, + image->size, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &image->text_mapping); - if (ret) + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto up_fail; + } vma = _install_special_mapping(mm, addr + image->size, image->sym_end_mapping - image->size, VM_READ, - no_pages); + &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); diff --git a/include/linux/mm.h b/include/linux/mm.h index 63f8d4efe303..05aab09803e6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1782,7 +1782,9 @@ extern struct file *get_mm_exe_file(struct mm_struct *mm); extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, - unsigned long flags, struct page **pages); + unsigned long flags, + const struct vm_special_mapping *spec); +/* This is an obsolete alternative to _install_special_mapping. */ extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8967e20cbe57..22c6f4e16d10 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -510,4 +510,10 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm) } #endif +struct vm_special_mapping +{ + const char *name; + struct page **pages; +}; + #endif /* _LINUX_MM_TYPES_H */ diff --git a/mm/mmap.c b/mm/mmap.c index b1202cf81f4b..52bbc9514d9d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2872,6 +2872,31 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) return 1; } +static int special_mapping_fault(struct vm_area_struct *vma, + struct vm_fault *vmf); + +/* + * Having a close hook prevents vma merging regardless of flags. + */ +static void special_mapping_close(struct vm_area_struct *vma) +{ +} + +static const char *special_mapping_name(struct vm_area_struct *vma) +{ + return ((struct vm_special_mapping *)vma->vm_private_data)->name; +} + +static const struct vm_operations_struct special_mapping_vmops = { + .close = special_mapping_close, + .fault = special_mapping_fault, + .name = special_mapping_name, +}; + +static const struct vm_operations_struct legacy_special_mapping_vmops = { + .close = special_mapping_close, + .fault = special_mapping_fault, +}; static int special_mapping_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -2887,7 +2912,13 @@ static int special_mapping_fault(struct vm_area_struct *vma, */ pgoff = vmf->pgoff - vma->vm_pgoff; - for (pages = vma->vm_private_data; pgoff && *pages; ++pages) + if (vma->vm_ops == &legacy_special_mapping_vmops) + pages = vma->vm_private_data; + else + pages = ((struct vm_special_mapping *)vma->vm_private_data)-> + pages; + + for (; pgoff && *pages; ++pages) pgoff--; if (*pages) { @@ -2900,30 +2931,11 @@ static int special_mapping_fault(struct vm_area_struct *vma, return VM_FAULT_SIGBUS; } -/* - * Having a close hook prevents vma merging regardless of flags. - */ -static void special_mapping_close(struct vm_area_struct *vma) -{ -} - -static const struct vm_operations_struct special_mapping_vmops = { - .close = special_mapping_close, - .fault = special_mapping_fault, -}; - -/* - * Called with mm->mmap_sem held for writing. - * Insert a new vma covering the given region, with the given flags. - * Its pages are supplied by the given array of struct page *. - * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. - * The region past the last page supplied will always produce SIGBUS. - * The array pointer and the pages it points to are assumed to stay alive - * for as long as this mapping might exist. - */ -struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, - unsigned long addr, unsigned long len, - unsigned long vm_flags, struct page **pages) +static struct vm_area_struct *__install_special_mapping( + struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long vm_flags, const struct vm_operations_struct *ops, + void *priv) { int ret; struct vm_area_struct *vma; @@ -2940,8 +2952,8 @@ struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - vma->vm_ops = &special_mapping_vmops; - vma->vm_private_data = pages; + vma->vm_ops = ops; + vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) @@ -2958,12 +2970,31 @@ out: return ERR_PTR(ret); } +/* + * Called with mm->mmap_sem held for writing. + * Insert a new vma covering the given region, with the given flags. + * Its pages are supplied by the given array of struct page *. + * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. + * The region past the last page supplied will always produce SIGBUS. + * The array pointer and the pages it points to are assumed to stay alive + * for as long as this mapping might exist. + */ +struct vm_area_struct *_install_special_mapping( + struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long vm_flags, const struct vm_special_mapping *spec) +{ + return __install_special_mapping(mm, addr, len, vm_flags, + &special_mapping_vmops, (void *)spec); +} + int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { - struct vm_area_struct *vma = _install_special_mapping(mm, - addr, len, vm_flags, pages); + struct vm_area_struct *vma = __install_special_mapping( + mm, addr, len, vm_flags, &legacy_special_mapping_vmops, + (void *)pages); if (IS_ERR(vma)) return PTR_ERR(vma); -- cgit v1.2.3 From 9dd3107576c4bbd40e1c2c8b24d560abf9a7b991 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 8 May 2014 16:06:17 -0500 Subject: of: align RESERVEDMEM_OF_DECLARE function callbacks to other callbacks All the parameters for RESERVEDMEM_OF_DECLARE function callbacks are members of struct reserved_mem, so just pass the struct ptr to callback functions so the function callback is more in line with other OF match table callbacks. Acked-by: Marek Szyprowski Acked-by: Grant Likely Signed-off-by: Rob Herring --- drivers/of/of_reserved_mem.c | 2 +- include/linux/of_reserved_mem.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index e420eb52e5c9..632aae861375 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -188,7 +188,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem) if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) continue; - if (initfn(rmem, rmem->fdt_node, rmem->name) == 0) { + if (initfn(rmem) == 0) { pr_info("Reserved memory: initialized node %s, compatible id %s\n", rmem->name, compat); return 0; diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 9b1fbb7f29fc..4c81b84e95ff 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -21,8 +21,8 @@ struct reserved_mem_ops { struct device *dev); }; -typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem, - unsigned long node, const char *uname); +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); + #ifdef CONFIG_OF_RESERVED_MEM void fdt_init_reserved_mem(void); -- cgit v1.2.3 From 54196ccbe0ba1f268a646059473313589db35b01 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 8 May 2014 16:09:24 -0500 Subject: of: consolidate linker section OF match table declarations We now have several OF match tables using linker sections that are nearly the same definition. The only variation is the callback function prototype. Create a common define for creating linker section OF match table entries which each table declaration can use. Acked-by: Grant Likely Signed-off-by: Rob Herring --- drivers/clocksource/clksrc-of.c | 2 +- drivers/irqchip/irqchip.h | 7 +++---- include/linux/clk-provider.h | 5 +---- include/linux/clocksource.h | 16 +++------------- include/linux/of.h | 22 ++++++++++++++++++++++ include/linux/of_reserved_mem.h | 18 ++---------------- 6 files changed, 32 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index ae2e4278c42a..0093a8e49e14 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c @@ -27,7 +27,7 @@ void __init clocksource_of_init(void) { struct device_node *np; const struct of_device_id *match; - clocksource_of_init_fn init_func; + of_init_fn_1 init_func; unsigned clocksources = 0; for_each_matching_node_and_match(np, __clksrc_of_table, &match) { diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h index e445ba2d6add..0f6486d4f1b0 100644 --- a/drivers/irqchip/irqchip.h +++ b/drivers/irqchip/irqchip.h @@ -11,6 +11,8 @@ #ifndef _IRQCHIP_H #define _IRQCHIP_H +#include + /* * This macro must be used by the different irqchip drivers to declare * the association between their DT compatible string and their @@ -21,9 +23,6 @@ * @compstr: compatible string of the irqchip driver * @fn: initialization function */ -#define IRQCHIP_DECLARE(name,compstr,fn) \ - static const struct of_device_id irqchip_of_match_##name \ - __used __section(__irqchip_of_table) \ - = { .compatible = compstr, .data = fn } +#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) #endif diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 511917416fb0..a6e4008a0bf7 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -498,10 +498,7 @@ struct clk_onecell_data { extern struct of_device_id __clk_of_table; -#define CLK_OF_DECLARE(name, compat, fn) \ - static const struct of_device_id __clk_of_table_##name \ - __used __section(__clk_of_table) \ - = { .compatible = compat, .data = fn }; +#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) #ifdef CONFIG_OF int of_clk_add_provider(struct device_node *np, diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 67301a405712..a16b497d5159 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -339,23 +339,13 @@ extern int clocksource_mmio_init(void __iomem *, const char *, extern int clocksource_i8253_init(void); -struct device_node; -typedef void(*clocksource_of_init_fn)(struct device_node *); +#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ + OF_DECLARE_1(clksrc, name, compat, fn) + #ifdef CONFIG_CLKSRC_OF extern void clocksource_of_init(void); - -#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ - static const struct of_device_id __clksrc_of_table_##name \ - __used __section(__clksrc_of_table) \ - = { .compatible = compat, \ - .data = (fn == (clocksource_of_init_fn)NULL) ? fn : fn } #else static inline void clocksource_of_init(void) {} -#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ - static const struct of_device_id __clksrc_of_table_##name \ - __attribute__((unused)) \ - = { .compatible = compat, \ - .data = (fn == (clocksource_of_init_fn)NULL) ? fn : fn } #endif #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 3bad8d106e0e..bf65335b4d05 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -757,4 +757,26 @@ static inline int of_get_available_child_count(const struct device_node *np) return num; } +#ifdef CONFIG_OF +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __used __section(__##table##_of_table) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } +#else +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __attribute__((unused)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } +#endif + +typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); +typedef void (*of_init_fn_1)(struct device_node *); + +#define OF_DECLARE_1(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_1) +#define OF_DECLARE_2(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_2) + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 4c81b84e95ff..4669ddfdd5af 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -23,31 +23,17 @@ struct reserved_mem_ops { typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); +#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ + _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) #ifdef CONFIG_OF_RESERVED_MEM void fdt_init_reserved_mem(void); void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size); - -#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ - static const struct of_device_id __reservedmem_of_table_##name \ - __used __section(__reservedmem_of_table) \ - = { .compatible = compat, \ - .data = (init == (reservedmem_of_init_fn)NULL) ? \ - init : init } - #else static inline void fdt_init_reserved_mem(void) { } static inline void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size) { } - -#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ - static const struct of_device_id __reservedmem_of_table_##name \ - __attribute__((unused)) \ - = { .compatible = compat, \ - .data = (init == (reservedmem_of_init_fn)NULL) ? \ - init : init } - #endif #endif /* __OF_RESERVED_MEM_H */ -- cgit v1.2.3 From b0b6abd34c1b508d4ac95dbc614f36c49d29e65a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 27 Mar 2014 08:06:16 -0500 Subject: serial: earlycon: add DT support This adds the infrastructure to generic earlycon for earlycon setup using DT. The actual setup is not enabled until a following commit to add the FDT parsing. Signed-off-by: Rob Herring Cc: Greg Kroah-Hartman Cc: Jiri Slaby Cc: Arnd Bergmann Acked-by: Grant Likely --- drivers/tty/serial/earlycon.c | 28 ++++++++++++++++++++++++++++ include/asm-generic/vmlinux.lds.h | 4 +++- include/linux/serial_core.h | 6 ++++++ 3 files changed, 37 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index c92e83088adb..5131b5ee6164 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #ifdef CONFIG_FIX_EARLYCON_MEM #include @@ -32,6 +34,9 @@ static struct earlycon_device early_console_dev = { .con = &early_con, }; +static const struct of_device_id __earlycon_of_table_sentinel + __used __section(__earlycon_of_table_end); + static void __iomem * __init earlycon_map(unsigned long paddr, size_t size) { void __iomem *base; @@ -142,3 +147,26 @@ int __init setup_earlycon(char *buf, const char *match, register_console(early_console_dev.con); return 0; } + +int __init of_setup_earlycon(unsigned long addr, + int (*setup)(struct earlycon_device *, const char *)) +{ + int err; + struct uart_port *port = &early_console_dev.port; + + port->iotype = UPIO_MEM; + port->mapbase = addr; + port->uartclk = BASE_BAUD * 16; + port->membase = earlycon_map(addr, SZ_4K); + + early_console_dev.con->data = &early_console_dev; + err = setup(&early_console_dev, NULL); + if (err < 0) + return err; + if (!early_console_dev.con->write) + return -ENODEV; + + + register_console(early_console_dev.con); + return 0; +} diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b9404f6590f1..d647637cd699 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -155,6 +155,7 @@ #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) #define KERNEL_DTB() \ STRUCT_ALIGN(); \ @@ -483,7 +484,8 @@ CLKSRC_OF_TABLES() \ CPU_METHOD_OF_TABLES() \ KERNEL_DTB() \ - IRQCHIP_OF_MATCH_TABLE() + IRQCHIP_OF_MATCH_TABLE() \ + EARLYCON_OF_TABLES() #define INIT_TEXT \ *(.init.text) \ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 7a15b5b24c0b..5bbb809ee197 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -294,6 +294,9 @@ struct earlycon_device { int setup_earlycon(char *buf, const char *match, int (*setup)(struct earlycon_device *, const char *)); +extern int of_setup_earlycon(unsigned long addr, + int (*setup)(struct earlycon_device *, const char *)); + #define EARLYCON_DECLARE(name, func) \ static int __init name ## _setup_earlycon(char *buf) \ { \ @@ -301,6 +304,9 @@ static int __init name ## _setup_earlycon(char *buf) \ } \ early_param("earlycon", name ## _setup_earlycon); +#define OF_EARLYCON_DECLARE(name, compat, fn) \ + _OF_DECLARE(earlycon, name, compat, fn, void *) + struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); void uart_parse_options(char *options, int *baud, int *parity, int *bits, -- cgit v1.2.3 From e06e8b27082852bdab417af884241a4ed2037c73 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 27 Mar 2014 07:37:43 -0500 Subject: of/fdt: add FDT address translation support Copy u-boot's FDT address translation code from common/fdt_support. This code was originally based on the kernel's unflattened DT address parsing code. This commit can be reverted once relicensing of this code to GPLv2/BSD is done and it is added to libfdt. Signed-off-by: Rob Herring Acked-by: Grant Likely --- drivers/of/Makefile | 2 + drivers/of/fdt_address.c | 241 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/of_fdt.h | 1 + 3 files changed, 244 insertions(+) create mode 100644 drivers/of/fdt_address.c (limited to 'include/linux') diff --git a/drivers/of/Makefile b/drivers/of/Makefile index 9891232f999e..099b1fb00af4 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -1,5 +1,6 @@ obj-y = base.o device.o platform.o obj-$(CONFIG_OF_FLATTREE) += fdt.o +obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o obj-$(CONFIG_OF_PROMTREE) += pdt.o obj-$(CONFIG_OF_ADDRESS) += address.o obj-$(CONFIG_OF_IRQ) += irq.o @@ -12,3 +13,4 @@ obj-$(CONFIG_OF_MTD) += of_mtd.o obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt +CFLAGS_fdt_address.o = -I$(src)/../../scripts/dtc/libfdt diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c new file mode 100644 index 000000000000..8d3dc6fbdb7a --- /dev/null +++ b/drivers/of/fdt_address.c @@ -0,0 +1,241 @@ +/* + * FDT Address translation based on u-boot fdt_support.c which in turn was + * based on the kernel unflattened DT address translation code. + * + * (C) Copyright 2007 + * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com + * + * Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ +#include +#include +#include +#include +#include + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 +#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ + (ns) > 0) + +/* Debug utility */ +#ifdef DEBUG +static void __init of_dump_addr(const char *s, const __be32 *addr, int na) +{ + pr_debug("%s", s); + while(na--) + pr_cont(" %08x", *(addr++)); + pr_debug("\n"); +} +#else +static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { } +#endif + +/* Callbacks for bus specific translators */ +struct of_bus { + void (*count_cells)(const void *blob, int parentoffset, + int *addrc, int *sizec); + u64 (*map)(__be32 *addr, const __be32 *range, + int na, int ns, int pna); + int (*translate)(__be32 *addr, u64 offset, int na); +}; + +/* Default translator (generic bus) */ +static void __init fdt_bus_default_count_cells(const void *blob, int parentoffset, + int *addrc, int *sizec) +{ + const __be32 *prop; + + if (addrc) { + prop = fdt_getprop(blob, parentoffset, "#address-cells", NULL); + if (prop) + *addrc = be32_to_cpup(prop); + else + *addrc = dt_root_addr_cells; + } + + if (sizec) { + prop = fdt_getprop(blob, parentoffset, "#size-cells", NULL); + if (prop) + *sizec = be32_to_cpup(prop); + else + *sizec = dt_root_size_cells; + } +} + +static u64 __init fdt_bus_default_map(__be32 *addr, const __be32 *range, + int na, int ns, int pna) +{ + u64 cp, s, da; + + cp = of_read_number(range, na); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr, na); + + pr_debug("FDT: default map, cp=%llx, s=%llx, da=%llx\n", + cp, s, da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int __init fdt_bus_default_translate(__be32 *addr, u64 offset, int na) +{ + u64 a = of_read_number(addr, na); + memset(addr, 0, na * 4); + a += offset; + if (na > 1) + addr[na - 2] = cpu_to_fdt32(a >> 32); + addr[na - 1] = cpu_to_fdt32(a & 0xffffffffu); + + return 0; +} + +/* Array of bus specific translators */ +static const struct of_bus of_busses[] __initconst = { + /* Default */ + { + .count_cells = fdt_bus_default_count_cells, + .map = fdt_bus_default_map, + .translate = fdt_bus_default_translate, + }, +}; + +static int __init fdt_translate_one(const void *blob, int parent, + const struct of_bus *bus, + const struct of_bus *pbus, __be32 *addr, + int na, int ns, int pna, const char *rprop) +{ + const __be32 *ranges; + int rlen; + int rone; + u64 offset = OF_BAD_ADDR; + + ranges = fdt_getprop(blob, parent, rprop, &rlen); + if (!ranges) + return 1; + if (rlen == 0) { + offset = of_read_number(addr, na); + memset(addr, 0, pna * 4); + pr_debug("FDT: empty ranges, 1:1 translation\n"); + goto finish; + } + + pr_debug("FDT: walking ranges...\n"); + + /* Now walk through the ranges */ + rlen /= 4; + rone = na + pna + ns; + for (; rlen >= rone; rlen -= rone, ranges += rone) { + offset = bus->map(addr, ranges, na, ns, pna); + if (offset != OF_BAD_ADDR) + break; + } + if (offset == OF_BAD_ADDR) { + pr_debug("FDT: not found !\n"); + return 1; + } + memcpy(addr, ranges + na, 4 * pna); + + finish: + of_dump_addr("FDT: parent translation for:", addr, pna); + pr_debug("FDT: with offset: %llx\n", offset); + + /* Translate it into parent bus space */ + return pbus->translate(addr, offset, pna); +} + +/* + * Translate an address from the device-tree into a CPU physical address, + * this walks up the tree and applies the various bus mappings on the + * way. + * + * Note: We consider that crossing any level with #size-cells == 0 to mean + * that translation is impossible (that is we are not dealing with a value + * that can be mapped to a cpu physical address). This is not really specified + * that way, but this is traditionally the way IBM at least do things + */ +u64 __init fdt_translate_address(const void *blob, int node_offset) +{ + int parent, len; + const struct of_bus *bus, *pbus; + const __be32 *reg; + __be32 addr[OF_MAX_ADDR_CELLS]; + int na, ns, pna, pns; + u64 result = OF_BAD_ADDR; + + pr_debug("FDT: ** translation for device %s **\n", + fdt_get_name(blob, node_offset, NULL)); + + reg = fdt_getprop(blob, node_offset, "reg", &len); + if (!reg) { + pr_err("FDT: warning: device tree node '%s' has no address.\n", + fdt_get_name(blob, node_offset, NULL)); + goto bail; + } + + /* Get parent & match bus type */ + parent = fdt_parent_offset(blob, node_offset); + if (parent < 0) + goto bail; + bus = &of_busses[0]; + + /* Cound address cells & copy address locally */ + bus->count_cells(blob, parent, &na, &ns); + if (!OF_CHECK_COUNTS(na, ns)) { + pr_err("FDT: Bad cell count for %s\n", + fdt_get_name(blob, node_offset, NULL)); + goto bail; + } + memcpy(addr, reg, na * 4); + + pr_debug("FDT: bus (na=%d, ns=%d) on %s\n", + na, ns, fdt_get_name(blob, parent, NULL)); + of_dump_addr("OF: translating address:", addr, na); + + /* Translate */ + for (;;) { + /* Switch to parent bus */ + node_offset = parent; + parent = fdt_parent_offset(blob, node_offset); + + /* If root, we have finished */ + if (parent < 0) { + pr_debug("FDT: reached root node\n"); + result = of_read_number(addr, na); + break; + } + + /* Get new parent bus and counts */ + pbus = &of_busses[0]; + pbus->count_cells(blob, parent, &pna, &pns); + if (!OF_CHECK_COUNTS(pna, pns)) { + pr_err("FDT: Bad cell count for %s\n", + fdt_get_name(blob, node_offset, NULL)); + break; + } + + pr_debug("FDT: parent bus (na=%d, ns=%d) on %s\n", + pna, pns, fdt_get_name(blob, parent, NULL)); + + /* Apply bus translation */ + if (fdt_translate_one(blob, node_offset, bus, pbus, + addr, na, ns, pna, "ranges")) + break; + + /* Complete the move up one level */ + na = pna; + ns = pns; + bus = pbus; + + of_dump_addr("FDT: one level translation:", addr, na); + } + bail: + return result; +} diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 5c0ab057eecf..05117899fcb4 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -83,6 +83,7 @@ extern void unflatten_device_tree(void); extern void unflatten_and_copy_device_tree(void); extern void early_init_devtree(void *); extern void early_get_first_memblock_info(void *, phys_addr_t *); +extern u64 fdt_translate_address(const void *blob, int node_offset); #else /* CONFIG_OF_FLATTREE */ static inline void early_init_fdt_scan_reserved_mem(void) {} static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } -- cgit v1.2.3 From 77f2ea2f8d0833f9e976368481fb9a0775acf9e7 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 30 Apr 2014 11:20:53 -0600 Subject: DMA-API: Clarify physical/bus address distinction The DMA-API documentation sometimes refers to "physical addresses" when it really means "bus addresses." Sometimes these are identical, but they may be different if the bridge leading to the bus performs address translation. Update the documentation to use "bus address" when appropriate. Also, consistently capitalize "DMA", use parens with function names, use dev_printk() in examples, and reword a few sections for clarity. No functional change; documentation changes only. Signed-off-by: Bjorn Helgaas Acked-by: Greg Kroah-Hartman Acked-by: Arnd Bergmann Acked-by: James Bottomley Acked-by: Randy Dunlap --- Documentation/DMA-API-HOWTO.txt | 192 +++++++++++++++++++++++++--------------- Documentation/DMA-API.txt | 139 +++++++++++++++-------------- Documentation/DMA-ISA-LPC.txt | 4 +- include/linux/dma-mapping.h | 6 ++ include/linux/types.h | 1 + 5 files changed, 204 insertions(+), 138 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 5e983031cc11..fd3727b94ac2 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -9,16 +9,76 @@ This is a guide to device driver writers on how to use the DMA API with example pseudo-code. For a concise description of the API, see DMA-API.txt. -Most of the 64bit platforms have special hardware that translates bus -addresses (DMA addresses) into physical addresses. This is similar to -how page tables and/or a TLB translates virtual addresses to physical -addresses on a CPU. This is needed so that e.g. PCI devices can -access with a Single Address Cycle (32bit DMA address) any page in the -64bit physical address space. Previously in Linux those 64bit -platforms had to set artificial limits on the maximum RAM size in the -system, so that the virt_to_bus() static scheme works (the DMA address -translation tables were simply filled on bootup to map each bus -address to the physical page __pa(bus_to_virt())). + CPU and DMA addresses + +There are several kinds of addresses involved in the DMA API, and it's +important to understand the differences. + +The kernel normally uses virtual addresses. Any address returned by +kmalloc(), vmalloc(), and similar interfaces is a virtual address and can +be stored in a "void *". + +The virtual memory system (TLB, page tables, etc.) translates virtual +addresses to CPU physical addresses, which are stored as "phys_addr_t" or +"resource_size_t". The kernel manages device resources like registers as +physical addresses. These are the addresses in /proc/iomem. The physical +address is not directly useful to a driver; it must use ioremap() to map +the space and produce a virtual address. + +I/O devices use a third kind of address: a "bus address" or "DMA address". +If a device has registers at an MMIO address, or if it performs DMA to read +or write system memory, the addresses used by the device are bus addresses. +In some systems, bus addresses are identical to CPU physical addresses, but +in general they are not. IOMMUs and host bridges can produce arbitrary +mappings between physical and bus addresses. + +Here's a picture and some examples: + + CPU CPU Bus + Virtual Physical Address + Address Address Space + Space Space + + +-------+ +------+ +------+ + | | |MMIO | Offset | | + | | Virtual |Space | applied | | + C +-------+ --------> B +------+ ----------> +------+ A + | | mapping | | by host | | + +-----+ | | | | bridge | | +--------+ + | | | | +------+ | | | | + | CPU | | | | RAM | | | | Device | + | | | | | | | | | | + +-----+ +-------+ +------+ +------+ +--------+ + | | Virtual |Buffer| Mapping | | + X +-------+ --------> Y +------+ <---------- +------+ Z + | | mapping | RAM | by IOMMU + | | | | + | | | | + +-------+ +------+ + +During the enumeration process, the kernel learns about I/O devices and +their MMIO space and the host bridges that connect them to the system. For +example, if a PCI device has a BAR, the kernel reads the bus address (A) +from the BAR and converts it to a CPU physical address (B). The address B +is stored in a struct resource and usually exposed via /proc/iomem. When a +driver claims a device, it typically uses ioremap() to map physical address +B at a virtual address (C). It can then use, e.g., ioread32(C), to access +the device registers at bus address A. + +If the device supports DMA, the driver sets up a buffer using kmalloc() or +a similar interface, which returns a virtual address (X). The virtual +memory system maps X to a physical address (Y) in system RAM. The driver +can use virtual address X to access the buffer, but the device itself +cannot because DMA doesn't go through the CPU virtual memory system. + +In some simple systems, the device can do DMA directly to physical address +Y. But in many others, there is IOMMU hardware that translates bus +addresses to physical addresses, e.g., it translates Z to Y. This is part +of the reason for the DMA API: the driver can give a virtual address X to +an interface like dma_map_single(), which sets up any required IOMMU +mapping and returns the bus address Z. The driver then tells the device to +do DMA to Z, and the IOMMU maps it to the buffer at address Y in system +RAM. So that Linux can use the dynamic DMA mapping, it needs some help from the drivers, namely it has to take into account that DMA addresses should be @@ -29,17 +89,17 @@ The following API will work of course even on platforms where no such hardware exists. Note that the DMA API works with any bus independent of the underlying -microprocessor architecture. You should use the DMA API rather than -the bus specific DMA API (e.g. pci_dma_*). +microprocessor architecture. You should use the DMA API rather than the +bus-specific DMA API, i.e., use the dma_map_*() interfaces rather than the +pci_map_*() interfaces. First of all, you should make sure #include -is in your driver. This file will obtain for you the definition of the -dma_addr_t (which can hold any valid DMA address for the platform) -type which should be used everywhere you hold a DMA (bus) address -returned from the DMA mapping functions. +is in your driver, which provides the definition of dma_addr_t. This type +can hold any valid DMA or bus address for the platform and should be used +everywhere you hold a DMA address returned from the DMA mapping functions. What memory is DMA'able? @@ -123,9 +183,9 @@ Here, dev is a pointer to the device struct of your device, and mask is a bit mask describing which bits of an address your device supports. It returns zero if your card can perform DMA properly on the machine given the address mask you provided. In general, the -device struct of your device is embedded in the bus specific device -struct of your device. For example, a pointer to the device struct of -your PCI device is pdev->dev (pdev is a pointer to the PCI device +device struct of your device is embedded in the bus-specific device +struct of your device. For example, &pdev->dev is a pointer to the +device struct of a PCI device (pdev is a pointer to the PCI device struct of your device). If it returns non-zero, your device cannot perform DMA properly on @@ -147,8 +207,7 @@ exactly why. The standard 32-bit addressing device would do something like this: if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { - printk(KERN_WARNING - "mydev: No suitable DMA available.\n"); + dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } @@ -170,8 +229,7 @@ all 64-bits when accessing streaming DMA: } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) { using_dac = 0; } else { - printk(KERN_WARNING - "mydev: No suitable DMA available.\n"); + dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } @@ -187,8 +245,7 @@ the case would look like this: using_dac = 0; consistent_using_dac = 0; } else { - printk(KERN_WARNING - "mydev: No suitable DMA available.\n"); + dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } @@ -201,8 +258,7 @@ Finally, if your device can only drive the low 24-bits of address you might do something like: if (dma_set_mask(dev, DMA_BIT_MASK(24))) { - printk(KERN_WARNING - "mydev: 24-bit DMA addressing not available.\n"); + dev_warn(dev, "mydev: 24-bit DMA addressing not available\n"); goto ignore_this_device; } @@ -232,14 +288,14 @@ Here is pseudo-code showing how this might be done: card->playback_enabled = 1; } else { card->playback_enabled = 0; - printk(KERN_WARNING "%s: Playback disabled due to DMA limitations.\n", + dev_warn(dev, "%s: Playback disabled due to DMA limitations\n", card->name); } if (!dma_set_mask(dev, RECORD_ADDRESS_BITS)) { card->record_enabled = 1; } else { card->record_enabled = 0; - printk(KERN_WARNING "%s: Record disabled due to DMA limitations.\n", + dev_warn(dev, "%s: Record disabled due to DMA limitations\n", card->name); } @@ -331,7 +387,7 @@ context with the GFP_ATOMIC flag. Size is the length of the region you want to allocate, in bytes. This routine will allocate RAM for that region, so it acts similarly to -__get_free_pages (but takes size instead of a page order). If your +__get_free_pages() (but takes size instead of a page order). If your driver needs regions sized smaller than a page, you may prefer using the dma_pool interface, described below. @@ -343,11 +399,11 @@ the consistent DMA mask has been explicitly changed via dma_set_coherent_mask(). This is true of the dma_pool interface as well. -dma_alloc_coherent returns two values: the virtual address which you +dma_alloc_coherent() returns two values: the virtual address which you can use to access it from the CPU and dma_handle which you pass to the card. -The cpu return address and the DMA bus master address are both +The CPU virtual address and the DMA bus address are both guaranteed to be aligned to the smallest PAGE_SIZE order which is greater than or equal to the requested size. This invariant exists (for example) to guarantee that if you allocate a chunk @@ -359,13 +415,13 @@ To unmap and free such a DMA region, you call: dma_free_coherent(dev, size, cpu_addr, dma_handle); where dev, size are the same as in the above call and cpu_addr and -dma_handle are the values dma_alloc_coherent returned to you. +dma_handle are the values dma_alloc_coherent() returned to you. This function may not be called in interrupt context. If your driver needs lots of smaller memory regions, you can write -custom code to subdivide pages returned by dma_alloc_coherent, +custom code to subdivide pages returned by dma_alloc_coherent(), or you can use the dma_pool API to do that. A dma_pool is like -a kmem_cache, but it uses dma_alloc_coherent not __get_free_pages. +a kmem_cache, but it uses dma_alloc_coherent(), not __get_free_pages(). Also, it understands common hardware constraints for alignment, like queue heads needing to be aligned on N byte boundaries. @@ -381,29 +437,29 @@ type of data is "align" (which is expressed in bytes, and must be a power of two). If your device has no boundary crossing restrictions, pass 0 for alloc; passing 4096 says memory allocated from this pool must not cross 4KByte boundaries (but at that time it may be better to -go for dma_alloc_coherent directly instead). +use dma_alloc_coherent() directly instead). -Allocate memory from a dma pool like this: +Allocate memory from a DMA pool like this: cpu_addr = dma_pool_alloc(pool, flags, &dma_handle); flags are SLAB_KERNEL if blocking is permitted (not in_interrupt nor -holding SMP locks), SLAB_ATOMIC otherwise. Like dma_alloc_coherent, +holding SMP locks), SLAB_ATOMIC otherwise. Like dma_alloc_coherent(), this returns two values, cpu_addr and dma_handle. Free memory that was allocated from a dma_pool like this: dma_pool_free(pool, cpu_addr, dma_handle); -where pool is what you passed to dma_pool_alloc, and cpu_addr and -dma_handle are the values dma_pool_alloc returned. This function +where pool is what you passed to dma_pool_alloc(), and cpu_addr and +dma_handle are the values dma_pool_alloc() returned. This function may be called in interrupt context. Destroy a dma_pool by calling: dma_pool_destroy(pool); -Make sure you've called dma_pool_free for all memory allocated +Make sure you've called dma_pool_free() for all memory allocated from a pool before you destroy the pool. This function may not be called in interrupt context. @@ -418,7 +474,7 @@ one of the following values: DMA_FROM_DEVICE DMA_NONE -One should provide the exact DMA direction if you know it. +You should provide the exact DMA direction if you know it. DMA_TO_DEVICE means "from main memory to the device" DMA_FROM_DEVICE means "from the device to main memory" @@ -489,14 +545,14 @@ and to unmap it: dma_unmap_single(dev, dma_handle, size, direction); You should call dma_mapping_error() as dma_map_single() could fail and return -error. Not all dma implementations support dma_mapping_error() interface. +error. Not all DMA implementations support the dma_mapping_error() interface. However, it is a good practice to call dma_mapping_error() interface, which will invoke the generic mapping error check interface. Doing so will ensure -that the mapping code will work correctly on all dma implementations without +that the mapping code will work correctly on all DMA implementations without any dependency on the specifics of the underlying implementation. Using the returned address without checking for errors could result in failures ranging from panics to silent data corruption. A couple of examples of incorrect ways -to check for errors that make assumptions about the underlying dma +to check for errors that make assumptions about the underlying DMA implementation are as follows and these are applicable to dma_map_page() as well. @@ -516,12 +572,12 @@ Incorrect example 2: goto map_error; } -You should call dma_unmap_single when the DMA activity is finished, e.g. +You should call dma_unmap_single() when the DMA activity is finished, e.g., from the interrupt which told you that the DMA transfer is done. -Using cpu pointers like this for single mappings has a disadvantage, +Using cpu pointers like this for single mappings has a disadvantage: you cannot reference HIGHMEM memory in this way. Thus, there is a -map/unmap interface pair akin to dma_{map,unmap}_single. These +map/unmap interface pair akin to dma_{map,unmap}_single(). These interfaces deal with page/offset pairs instead of cpu pointers. Specifically: @@ -550,7 +606,7 @@ Here, "offset" means byte offset within the given page. You should call dma_mapping_error() as dma_map_page() could fail and return error as outlined under the dma_map_single() discussion. -You should call dma_unmap_page when the DMA activity is finished, e.g. +You should call dma_unmap_page() when the DMA activity is finished, e.g., from the interrupt which told you that the DMA transfer is done. With scatterlists, you map a region gathered from several regions by: @@ -588,18 +644,16 @@ PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be it should _NOT_ be the 'count' value _returned_ from the dma_map_sg call. -Every dma_map_{single,sg} call should have its dma_unmap_{single,sg} -counterpart, because the bus address space is a shared resource (although -in some ports the mapping is per each BUS so less devices contend for the -same bus address space) and you could render the machine unusable by eating -all bus addresses. +Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}() +counterpart, because the bus address space is a shared resource and +you could render the machine unusable by consuming all bus addresses. If you need to use the same streaming DMA region multiple times and touch the data in between the DMA transfers, the buffer needs to be synced -properly in order for the cpu and device to see the most uptodate and +properly in order for the cpu and device to see the most up-to-date and correct copy of the DMA buffer. -So, firstly, just map it with dma_map_{single,sg}, and after each DMA +So, firstly, just map it with dma_map_{single,sg}(), and after each DMA transfer call either: dma_sync_single_for_cpu(dev, dma_handle, size, direction); @@ -623,9 +677,9 @@ or: as appropriate. After the last DMA transfer call one of the DMA unmap routines -dma_unmap_{single,sg}. If you don't touch the data from the first dma_map_* -call till dma_unmap_*, then you don't have to call the dma_sync_* -routines at all. +dma_unmap_{single,sg}(). If you don't touch the data from the first +dma_map_*() call till dma_unmap_*(), then you don't have to call the +dma_sync_*() routines at all. Here is pseudo code which shows a situation in which you would need to use the dma_sync_*() interfaces. @@ -690,12 +744,12 @@ to use the dma_sync_*() interfaces. } } -Drivers converted fully to this interface should not use virt_to_bus any -longer, nor should they use bus_to_virt. Some drivers have to be changed a -little bit, because there is no longer an equivalent to bus_to_virt in the +Drivers converted fully to this interface should not use virt_to_bus() any +longer, nor should they use bus_to_virt(). Some drivers have to be changed a +little bit, because there is no longer an equivalent to bus_to_virt() in the dynamic DMA mapping scheme - you have to always store the DMA addresses -returned by the dma_alloc_coherent, dma_pool_alloc, and dma_map_single -calls (dma_map_sg stores them in the scatterlist itself if the platform +returned by the dma_alloc_coherent(), dma_pool_alloc(), and dma_map_single() +calls (dma_map_sg() stores them in the scatterlist itself if the platform supports dynamic DMA mapping in hardware) in your driver structures and/or in the card registers. @@ -709,9 +763,9 @@ as it is impossible to correctly support them. DMA address space is limited on some architectures and an allocation failure can be determined by: -- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 +- checking if dma_alloc_coherent() returns NULL or dma_map_sg returns 0 -- checking the returned dma_addr_t of dma_map_single and dma_map_page +- checking the dma_addr_t returned from dma_map_single() and dma_map_page() by using dma_mapping_error(): dma_addr_t dma_handle; @@ -794,7 +848,7 @@ Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when dma_unmap_single(array[i].dma_addr); } -Networking drivers must call dev_kfree_skb to free the socket buffer +Networking drivers must call dev_kfree_skb() to free the socket buffer and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook (ndo_start_xmit). This means that the socket buffer is just dropped in the failure case. @@ -831,7 +885,7 @@ transform some example code. DEFINE_DMA_UNMAP_LEN(len); }; -2) Use dma_unmap_{addr,len}_set to set these values. +2) Use dma_unmap_{addr,len}_set() to set these values. Example, before: ringp->mapping = FOO; @@ -842,7 +896,7 @@ transform some example code. dma_unmap_addr_set(ringp, mapping, FOO); dma_unmap_len_set(ringp, len, BAR); -3) Use dma_unmap_{addr,len} to access these values. +3) Use dma_unmap_{addr,len}() to access these values. Example, before: dma_unmap_single(dev, ringp->mapping, ringp->len, diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index e865279cec58..1147eba43128 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -4,22 +4,26 @@ James E.J. Bottomley This document describes the DMA API. For a more gentle introduction -of the API (and actual examples) see -Documentation/DMA-API-HOWTO.txt. +of the API (and actual examples), see Documentation/DMA-API-HOWTO.txt. -This API is split into two pieces. Part I describes the API. Part II -describes the extensions to the API for supporting non-consistent -memory machines. Unless you know that your driver absolutely has to -support non-consistent platforms (this is usually only legacy -platforms) you should only use the API described in part I. +This API is split into two pieces. Part I describes the basic API. +Part II describes extensions for supporting non-consistent memory +machines. Unless you know that your driver absolutely has to support +non-consistent platforms (this is usually only legacy platforms) you +should only use the API described in part I. Part I - dma_ API ------------------------------------- -To get the dma_ API, you must #include +To get the dma_ API, you must #include . This +provides dma_addr_t and the interfaces described below. +A dma_addr_t can hold any valid DMA or bus address for the platform. It +can be given to a device to use as a DMA source or target. A cpu cannot +reference a dma_addr_t directly because there may be translation between +its physical address space and the bus address space. -Part Ia - Using large dma-coherent buffers +Part Ia - Using large DMA-coherent buffers ------------------------------------------ void * @@ -33,20 +37,21 @@ to make sure to flush the processor's write buffers before telling devices to read that memory.) This routine allocates a region of bytes of consistent memory. -It also returns a which may be cast to an unsigned -integer the same width as the bus and used as the physical address -base of the region. -Returns: a pointer to the allocated region (in the processor's virtual +It returns a pointer to the allocated region (in the processor's virtual address space) or NULL if the allocation failed. +It also returns a which may be cast to an unsigned integer the +same width as the bus and given to the device as the bus address base of +the region. + Note: consistent memory can be expensive on some platforms, and the minimum allocation length may be as big as a page, so you should consolidate your requests for consistent memory as much as possible. The simplest way to do that is to use the dma_pool calls (see below). -The flag parameter (dma_alloc_coherent only) allows the caller to -specify the GFP_ flags (see kmalloc) for the allocation (the +The flag parameter (dma_alloc_coherent() only) allows the caller to +specify the GFP_ flags (see kmalloc()) for the allocation (the implementation may choose to ignore flags that affect the location of the returned memory, like GFP_DMA). @@ -61,24 +66,24 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) -Free the region of consistent memory you previously allocated. dev, -size and dma_handle must all be the same as those passed into the -consistent allocate. cpu_addr must be the virtual address returned by -the consistent allocate. +Free a region of consistent memory you previously allocated. dev, +size and dma_handle must all be the same as those passed into +dma_alloc_coherent(). cpu_addr must be the virtual address returned by +the dma_alloc_coherent(). Note that unlike their sibling allocation calls, these routines may only be called with IRQs enabled. -Part Ib - Using small dma-coherent buffers +Part Ib - Using small DMA-coherent buffers ------------------------------------------ To get this part of the dma_ API, you must #include -Many drivers need lots of small dma-coherent memory regions for DMA +Many drivers need lots of small DMA-coherent memory regions for DMA descriptors or I/O buffers. Rather than allocating in units of a page or more using dma_alloc_coherent(), you can use DMA pools. These work -much like a struct kmem_cache, except that they use the dma-coherent allocator, +much like a struct kmem_cache, except that they use the DMA-coherent allocator, not __get_free_pages(). Also, they understand common hardware constraints for alignment, like queue heads needing to be aligned on N-byte boundaries. @@ -87,7 +92,7 @@ for alignment, like queue heads needing to be aligned on N-byte boundaries. dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t alloc); -The pool create() routines initialize a pool of dma-coherent buffers +dma_pool_create() initializes a pool of DMA-coherent buffers for use with a given device. It must be called in a context which can sleep. @@ -102,25 +107,26 @@ from this pool must not cross 4KByte boundaries. void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, dma_addr_t *dma_handle); -This allocates memory from the pool; the returned memory will meet the size -and alignment requirements specified at creation time. Pass GFP_ATOMIC to -prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), -pass GFP_KERNEL to allow blocking. Like dma_alloc_coherent(), this returns -two values: an address usable by the cpu, and the dma address usable by the -pool's device. +This allocates memory from the pool; the returned memory will meet the +size and alignment requirements specified at creation time. Pass +GFP_ATOMIC to prevent blocking, or if it's permitted (not +in_interrupt, not holding SMP locks), pass GFP_KERNEL to allow +blocking. Like dma_alloc_coherent(), this returns two values: an +address usable by the cpu, and the DMA address usable by the pool's +device. void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); This puts memory back into the pool. The pool is what was passed to -the pool allocation routine; the cpu (vaddr) and dma addresses are what +dma_pool_alloc(); the cpu (vaddr) and DMA addresses are what were returned when that routine allocated the memory being freed. void dma_pool_destroy(struct dma_pool *pool); -The pool destroy() routines free the resources of the pool. They must be +dma_pool_destroy() frees the resources of the pool. It must be called in a context which can sleep. Make sure you've freed all allocated memory back to the pool before you destroy it. @@ -187,9 +193,9 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) Maps a piece of processor virtual memory so it can be accessed by the -device and returns the physical handle of the memory. +device and returns the bus address of the memory. -The direction for both api's may be converted freely by casting. +The direction for both APIs may be converted freely by casting. However the dma_ API uses a strongly typed enumerator for its direction: @@ -198,31 +204,30 @@ DMA_TO_DEVICE data is going from the memory to the device DMA_FROM_DEVICE data is coming from the device to the memory DMA_BIDIRECTIONAL direction isn't known -Notes: Not all memory regions in a machine can be mapped by this -API. Further, regions that appear to be physically contiguous in -kernel virtual space may not be contiguous as physical memory. Since -this API does not provide any scatter/gather capability, it will fail -if the user tries to map a non-physically contiguous piece of memory. -For this reason, it is recommended that memory mapped by this API be -obtained only from sources which guarantee it to be physically contiguous -(like kmalloc). - -Further, the physical address of the memory must be within the -dma_mask of the device (the dma_mask represents a bit mask of the -addressable region for the device. I.e., if the physical address of -the memory anded with the dma_mask is still equal to the physical -address, then the device can perform DMA to the memory). In order to +Notes: Not all memory regions in a machine can be mapped by this API. +Further, contiguous kernel virtual space may not be contiguous as +physical memory. Since this API does not provide any scatter/gather +capability, it will fail if the user tries to map a non-physically +contiguous piece of memory. For this reason, memory to be mapped by +this API should be obtained from sources which guarantee it to be +physically contiguous (like kmalloc). + +Further, the bus address of the memory must be within the +dma_mask of the device (the dma_mask is a bit mask of the +addressable region for the device, i.e., if the bus address of +the memory ANDed with the dma_mask is still equal to the bus +address, then the device can perform DMA to the memory). To ensure that the memory allocated by kmalloc is within the dma_mask, the driver may specify various platform-dependent flags to restrict -the physical memory range of the allocation (e.g. on x86, GFP_DMA -guarantees to be within the first 16Mb of available physical memory, +the bus address range of the allocation (e.g., on x86, GFP_DMA +guarantees to be within the first 16MB of available bus addresses, as required by ISA devices). Note also that the above constraints on physical contiguity and dma_mask may not apply if the platform has an IOMMU (a device which -supplies a physical to virtual mapping between the I/O memory bus and -the device). However, to be portable, device driver writers may *not* -assume that such an IOMMU exists. +maps an I/O bus address to a physical memory address). However, to be +portable, device driver writers may *not* assume that such an IOMMU +exists. Warnings: Memory coherency operates at a granularity called the cache line width. In order for memory mapped by this API to operate @@ -281,9 +286,9 @@ cache width is. int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -In some circumstances dma_map_single and dma_map_page will fail to create +In some circumstances dma_map_single() and dma_map_page() will fail to create a mapping. A driver can check for these errors by testing the returned -dma address with dma_mapping_error(). A non-zero return value means the mapping +DMA address with dma_mapping_error(). A non-zero return value means the mapping could not be created and the driver should take appropriate action (e.g. reduce current DMA mapping usage or delay and try again later). @@ -291,7 +296,7 @@ reduce current DMA mapping usage or delay and try again later). dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) -Returns: the number of physical segments mapped (this may be shorter +Returns: the number of bus address segments mapped (this may be shorter than passed in if some elements of the scatter/gather list are physically or virtually adjacent and an IOMMU maps them with a single entry). @@ -299,7 +304,7 @@ entry). Please note that the sg cannot be mapped again if it has been mapped once. The mapping process is allowed to destroy information in the sg. -As with the other mapping interfaces, dma_map_sg can fail. When it +As with the other mapping interfaces, dma_map_sg() can fail. When it does, 0 is returned and a driver must take appropriate action. It is critical that the driver do something, in the case of a block driver aborting the request or even oopsing is better than doing nothing and @@ -335,7 +340,7 @@ must be the same as those and passed in to the scatter/gather mapping API. Note: must be the number you passed in, *not* the number of -physical entries returned. +bus address entries returned. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, @@ -391,10 +396,10 @@ The four functions above are just like the counterpart functions without the _attrs suffixes, except that they pass an optional struct dma_attrs*. -struct dma_attrs encapsulates a set of "dma attributes". For the +struct dma_attrs encapsulates a set of "DMA attributes". For the definition of struct dma_attrs see linux/dma-attrs.h. -The interpretation of dma attributes is architecture-specific, and +The interpretation of DMA attributes is architecture-specific, and each attribute should be documented in Documentation/DMA-attributes.txt. If struct dma_attrs* is NULL, the semantics of each of these @@ -458,7 +463,7 @@ Note: where the platform can return consistent memory, it will guarantee that the sync points become nops. Warning: Handling non-consistent memory is a real pain. You should -only ever use this API if you positively know your driver will be +only use this API if you positively know your driver will be required to work on one of the rare (usually non-PCI) architectures that simply cannot make consistent memory. @@ -496,26 +501,26 @@ dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags) -Declare region of memory to be handed out by dma_alloc_coherent when +Declare region of memory to be handed out by dma_alloc_coherent() when it's asked for coherent memory for this device. bus_addr is the physical address to which the memory is currently assigned in the bus responding region (this will be used by the platform to perform the mapping). -device_addr is the physical address the device needs to be programmed +device_addr is the bus address the device needs to be programmed with actually to address this memory (this will be handed out as the dma_addr_t in dma_alloc_coherent()). size is the size of the area (must be multiples of PAGE_SIZE). -flags can be or'd together and are: +flags can be ORed together and are: DMA_MEMORY_MAP - request that the memory returned from dma_alloc_coherent() be directly writable. DMA_MEMORY_IO - request that the memory returned from -dma_alloc_coherent() be addressable using read/write/memcpy_toio etc. +dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc. One or both of these flags must be present. @@ -572,7 +577,7 @@ region is occupied. Part III - Debug drivers use of the DMA-API ------------------------------------------- -The DMA-API as described above as some constraints. DMA addresses must be +The DMA-API as described above has some constraints. DMA addresses must be released with the corresponding function with the same size for example. With the advent of hardware IOMMUs it becomes more and more important that drivers do not violate those constraints. In the worst case such a violation can @@ -690,11 +695,11 @@ architectural default. void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr); dma-debug interface debug_dma_mapping_error() to debug drivers that fail -to check dma mapping errors on addresses returned by dma_map_single() and +to check DMA mapping errors on addresses returned by dma_map_single() and dma_map_page() interfaces. This interface clears a flag set by debug_dma_map_page() to indicate that dma_mapping_error() has been called by the driver. When driver does unmap, debug_dma_unmap() checks the flag and if this flag is still set, prints warning message that includes call trace that leads up to the unmap. This interface can be called from dma_mapping_error() -routines to enable dma mapping error check debugging. +routines to enable DMA mapping error check debugging. diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt index e767805b4182..b1a19835e907 100644 --- a/Documentation/DMA-ISA-LPC.txt +++ b/Documentation/DMA-ISA-LPC.txt @@ -16,7 +16,7 @@ To do ISA style DMA you need to include two headers: #include The first is the generic DMA API used to convert virtual addresses to -physical addresses (see Documentation/DMA-API.txt for details). +bus addresses (see Documentation/DMA-API.txt for details). The second contains the routines specific to ISA DMA transfers. Since this is not present on all platforms make sure you construct your @@ -50,7 +50,7 @@ early as possible and not release it until the driver is unloaded.) Part III - Address translation ------------------------------ -To translate the virtual address to a physical use the normal DMA +To translate the virtual address to a bus address, use the normal DMA API. Do _not_ use isa_virt_to_phys() even though it does the same thing. The reason for this is that the function isa_virt_to_phys() will require a Kconfig dependency to ISA, not just ISA_DMA_API which diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index fd4aee29ad10..b9aa2b97aab5 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -8,6 +8,12 @@ #include #include +/* + * A dma_addr_t can hold any valid DMA or bus address for the platform. + * It can be given to a device to use as a DMA source or target. A CPU cannot + * reference a dma_addr_t directly because there may be translation between + * its physical address space and the bus address space. + */ struct dma_map_ops { void* (*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, diff --git a/include/linux/types.h b/include/linux/types.h index 4d118ba11349..a0bb7048687f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -142,6 +142,7 @@ typedef unsigned long blkcnt_t; #define pgoff_t unsigned long #endif +/* A dma_addr_t can hold any valid DMA or bus address for the platform */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT typedef u64 dma_addr_t; #else -- cgit v1.2.3 From 88a984ba0795f14a3847edbd7fabe652289ea89b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 20 May 2014 16:54:22 -0600 Subject: DMA-API: Change dma_declare_coherent_memory() CPU address to phys_addr_t dma_declare_coherent_memory() takes two addresses for a region of memory: a "bus_addr" and a "device_addr". I think the intent is that "bus_addr" is the physical address a *CPU* would use to access the region, and "device_addr" is the bus address the *device* would use to address the region. Rename "bus_addr" to "phys_addr" and change its type to phys_addr_t. Most callers already supply a phys_addr_t for this argument. The others supply a 32-bit integer (a constant, unsigned int, or __u32) and need no change. Use "unsigned long", not phys_addr_t, to hold PFNs. No functional change (this could theoretically fix a truncation in a config with 32-bit dma_addr_t and 64-bit phys_addr_t, but I don't think there are any such cases involving this code). Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann Acked-by: Greg Kroah-Hartman Acked-by: James Bottomley Acked-by: Randy Dunlap --- Documentation/DMA-API.txt | 9 ++++----- drivers/base/dma-coherent.c | 10 +++++----- drivers/base/dma-mapping.c | 6 +++--- include/asm-generic/dma-coherent.h | 13 +++++-------- include/linux/dma-mapping.h | 7 ++++--- 5 files changed, 21 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 1147eba43128..4f1cdc5febd1 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -497,19 +497,18 @@ continuing on for size. Again, you *must* observe the cache line boundaries when doing this. int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) Declare region of memory to be handed out by dma_alloc_coherent() when it's asked for coherent memory for this device. -bus_addr is the physical address to which the memory is currently -assigned in the bus responding region (this will be used by the -platform to perform the mapping). +phys_addr is the cpu physical address to which the memory is currently +assigned (this will be ioremapped so the cpu can access the region). device_addr is the bus address the device needs to be programmed -with actually to address this memory (this will be handed out as the +with to actually address this memory (this will be handed out as the dma_addr_t in dma_alloc_coherent()). size is the size of the area (must be multiples of PAGE_SIZE). diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bc256b641027..7d6e84a51424 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -10,13 +10,13 @@ struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; - phys_addr_t pfn_base; + unsigned long pfn_base; int size; int flags; unsigned long *bitmap; }; -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void __iomem *mem_base = NULL; @@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - mem_base = ioremap(bus_addr, size); + mem_base = ioremap(phys_addr, size); if (!mem_base) goto out; @@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; - dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); + dev->dma_mem->pfn_base = PFN_DOWN(phys_addr); dev->dma_mem->size = pages; dev->dma_mem->flags = flags; @@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, *ret = -ENXIO; if (off < count && user_count <= count - off) { - unsigned pfn = mem->pfn_base + start + off; + unsigned long pfn = mem->pfn_base + start + off; *ret = remap_pfn_range(vma, vma->vm_start, pfn, user_count << PAGE_SHIFT, vma->vm_page_prot); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 0ce39a33b3c2..6cd08e145bfa 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) /** * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() * @dev: Device to declare coherent memory for - * @bus_addr: Bus address of coherent memory to be declared + * @phys_addr: Physical address of coherent memory to be declared * @device_addr: Device address of coherent memory to be declared * @size: Size of coherent memory to be declared * @flags: Flags @@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) * RETURNS: * 0 on success, -errno on failure. */ -int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void *res; @@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, if (!res) return -ENOMEM; - rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, + rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, flags); if (rc == 0) devres_add(dev, res); diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index 2be8a2dbc868..0297e5875798 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h @@ -16,16 +16,13 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, * Standard interface */ #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); -extern void -dma_release_declared_memory(struct device *dev); +void dma_release_declared_memory(struct device *dev); -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); #else #define dma_alloc_from_coherent(dev, size, handle, ret) (0) #define dma_release_from_coherent(dev, order, vaddr) (0) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index b9aa2b97aab5..0c3eab1e39ac 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -192,7 +192,7 @@ static inline int dma_get_cache_alignment(void) #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY static inline int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { return 0; @@ -223,13 +223,14 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +extern int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags); extern void dmam_release_declared_memory(struct device *dev); #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ static inline int dmam_declare_coherent_memory(struct device *dev, - dma_addr_t bus_addr, dma_addr_t device_addr, + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, gfp_t gfp) { return 0; -- cgit v1.2.3 From f0ba3d05c9c647ab42ed6a0dbdfdeae42bfbd6de Mon Sep 17 00:00:00 2001 From: Eyal Perry Date: Tue, 20 May 2014 17:57:00 +0300 Subject: genirq: Provide !SMP stub for irq_set_affinity_notifier() Instead of requiring each consumer of the IRQ affinity notifier to have themselves be explicitly dependent on CONFIG_SMP, make the definition of struct irq_affinity_notify to exist independently of that config option and introduce a stub for irq_set_affinity_notifier() under non SMP configuration. Fixes: 2eacc23 ("net/mlx4_core: Enforce irq affinity changes immediatly") Signed-off-by: Eyal Perry Signed-off-by: Amir Vadai Cc: Ben Hutchings Cc: Yevgeny Petrilin Cc: Or Gerlitz Cc: David S. Miller Link: http://lkml.kernel.org/r/1400597820-30685-1-git-send-email-amirv@mellanox.com Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 97ac926c78a7..3f74c0593171 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -199,6 +199,26 @@ extern int check_wakeup_irqs(void); static inline int check_wakeup_irqs(void) { return 0; } #endif +/** + * struct irq_affinity_notify - context for notification of IRQ affinity changes + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use + * @work: Work item, for internal use + * @notify: Function to be called on change. This will be + * called in process context. + * @release: Function to be called on release. This will be + * called in process context. Once registered, the + * structure must only be freed when this function is + * called or later. + */ +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); +}; + #if defined(CONFIG_SMP) extern cpumask_var_t irq_default_affinity; @@ -242,26 +262,6 @@ extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); -/** - * struct irq_affinity_notify - context for notification of IRQ affinity changes - * @irq: Interrupt to which notification applies - * @kref: Reference count, for internal use - * @work: Work item, for internal use - * @notify: Function to be called on change. This will be - * called in process context. - * @release: Function to be called on release. This will be - * called in process context. Once registered, the - * structure must only be freed when this function is - * called or later. - */ -struct irq_affinity_notify { - unsigned int irq; - struct kref kref; - struct work_struct work; - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); -}; - extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); @@ -284,6 +284,12 @@ static inline int irq_set_affinity_hint(unsigned int irq, { return -EINVAL; } + +static inline int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +{ + return 0; +} #endif /* CONFIG_SMP */ /* -- cgit v1.2.3 From eed542d6962ba33a689b4007a389f466e407bd74 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Tue, 20 May 2014 20:31:04 +0900 Subject: ftrace: Make CALLER_ADDRx macros more generic Most archs with HAVE_ARCH_CALLER_ADDR have pretty much the same definitions of CALLER_ADDRx(n). Instead of duplicating the code for all the archs, define a ftrace_return_address0() and ftrace_return_address(n) that can be overwritten by the archs if they need to do something different. Instead of 7 macros in every arch, we now only have at most 2 (and actually only 1 as ftrace_return_address0() should be the same for all archs). The CALLER_ADDRx(n) will now be defined in linux/ftrace.h and use the ftrace_return_address*(n?) macros. This removes a lot of the duplicate code. Link: http://lkml.kernel.org/p/1400585464-30333-1-git-send-email-takahiro.akashi@linaro.org Signed-off-by: AKASHI Takahiro Signed-off-by: Steven Rostedt --- arch/arm/include/asm/ftrace.h | 10 +--------- arch/blackfin/include/asm/ftrace.h | 11 +---------- arch/parisc/include/asm/ftrace.h | 10 +--------- arch/sh/include/asm/ftrace.h | 10 +--------- arch/xtensa/include/asm/ftrace.h | 14 ++++---------- include/linux/ftrace.h | 34 ++++++++++++++++++---------------- 6 files changed, 26 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index f89515adac60..eb577f4f5f70 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -52,15 +52,7 @@ extern inline void *return_address(unsigned int level) #endif -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_addr(n) return_address(n) #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h index 8a029505d7b7..2f1c3c2657ad 100644 --- a/arch/blackfin/include/asm/ftrace.h +++ b/arch/blackfin/include/asm/ftrace.h @@ -66,16 +66,7 @@ extern inline void *return_address(unsigned int level) #endif /* CONFIG_FRAME_POINTER */ -#define HAVE_ARCH_CALLER_ADDR - -/* inline function or macro may lead to unexpected result */ -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h index 72c0fafaa039..544ed8ef87eb 100644 --- a/arch/parisc/include/asm/ftrace.h +++ b/arch/parisc/include/asm/ftrace.h @@ -24,15 +24,7 @@ extern void return_to_handler(void); extern unsigned long return_address(unsigned int); -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 return_address(1) -#define CALLER_ADDR2 return_address(2) -#define CALLER_ADDR3 return_address(3) -#define CALLER_ADDR4 return_address(4) -#define CALLER_ADDR5 return_address(5) -#define CALLER_ADDR6 return_address(6) +#define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 13e9966464c2..e79fb6ebaa42 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -40,15 +40,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) /* arch/sh/kernel/return_address.c */ extern void *return_address(unsigned int); -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 736b9d214d80..6c6d9a9f185f 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h @@ -12,24 +12,18 @@ #include -#define HAVE_ARCH_CALLER_ADDR #ifndef __ASSEMBLY__ -#define CALLER_ADDR0 ({ unsigned long a0, a1; \ +#define ftrace_return_address0 ({ unsigned long a0, a1; \ __asm__ __volatile__ ( \ "mov %0, a0\n" \ "mov %1, a1\n" \ : "=r"(a0), "=r"(a1)); \ MAKE_PC_FROM_RA(a0, a1); }) + #ifdef CONFIG_FRAME_POINTER extern unsigned long return_address(unsigned level); -#define CALLER_ADDR1 return_address(1) -#define CALLER_ADDR2 return_address(2) -#define CALLER_ADDR3 return_address(3) -#else /* CONFIG_FRAME_POINTER */ -#define CALLER_ADDR1 (0) -#define CALLER_ADDR2 (0) -#define CALLER_ADDR3 (0) -#endif /* CONFIG_FRAME_POINTER */ +#define ftrace_return_address(n) return_address(n) +#endif #endif /* __ASSEMBLY__ */ #ifdef CONFIG_FUNCTION_TRACER diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ae9504b4b67d..2018751cad9e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -616,25 +616,27 @@ static inline void __ftrace_enabled_restore(int enabled) #endif } -#ifndef HAVE_ARCH_CALLER_ADDR +/* All archs should have this, but we define it for consistency */ +#ifndef ftrace_return_address0 +# define ftrace_return_address0 __builtin_return_address(0) +#endif + +/* Archs may use other ways for ADDR1 and beyond */ +#ifndef ftrace_return_address # ifdef CONFIG_FRAME_POINTER -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) -# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) -# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) -# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) -# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) -# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) +# define ftrace_return_address(n) __builtin_return_address(n) # else -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 0UL -# define CALLER_ADDR2 0UL -# define CALLER_ADDR3 0UL -# define CALLER_ADDR4 0UL -# define CALLER_ADDR5 0UL -# define CALLER_ADDR6 0UL +# define ftrace_return_address(n) 0UL # endif -#endif /* ifndef HAVE_ARCH_CALLER_ADDR */ +#endif + +#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) +#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) +#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) +#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) +#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) +#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) +#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) #ifdef CONFIG_IRQSOFF_TRACER extern void time_hardirqs_on(unsigned long a0, unsigned long a1); -- cgit v1.2.3 From f01d907582f8461546379aa415a7c6d5cfb8e5fd Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Sat, 17 May 2014 14:54:50 +0900 Subject: gpio: make of_get_named_gpiod_flags() private of_get_named_gpiod_flags() is visible and directly usable by GPIO consumers, but it really should not as the gpiod interface relies on the simpler gpiod_get() to provide properly-configured GPIOs. of_get_named_gpiod_flags() is just used internally by gpiolib to implement gpiod_get(), and by the old of_get_named_gpio_flags() function, therefore it makes sense to make it gpiolib-private. As a side-effect, the unused (and unneeded) of_get_gpiod_flags() inline function is also removed, and of_get_named_gpio_flags() is moved from a static inline function to a regular one in gpiolib-of.c This results in all references to gpiod_* functions in of_gpio.h being gone, which is the way it should be since this file is part of the old integer GPIO interface. Changes since v1: - Fixed compilation error when CONFIG_OF_GPIO is not defined - Fixed warning due to of_gpio_flags enum not being declared in private gpiolib.h header Signed-off-by: Alexandre Courbot Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib-of.c | 14 ++++++++++++++ drivers/gpio/gpiolib.h | 5 +++++ include/linux/of_gpio.h | 35 +++-------------------------------- 3 files changed, 22 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index db98d3a12f70..af7e25c9a9ae 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -96,6 +96,20 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, } EXPORT_SYMBOL(of_get_named_gpiod_flags); +int of_get_named_gpio_flags(struct device_node *np, const char *list_name, + int index, enum of_gpio_flags *flags) +{ + struct gpio_desc *desc; + + desc = of_get_named_gpiod_flags(np, list_name, index, flags); + + if (IS_ERR(desc)) + return PTR_ERR(desc); + else + return desc_to_gpio(desc); +} +EXPORT_SYMBOL(of_get_named_gpio_flags); + /** * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags * @gc: pointer to the gpio_chip structure diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index cf092941a9fd..1a4103dd38df 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -15,6 +15,8 @@ #include #include +enum of_gpio_flags; + /** * struct acpi_gpio_info - ACPI GPIO specific information * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo @@ -46,4 +48,7 @@ acpi_get_gpiod_by_index(struct device *dev, int index, int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); void gpiochip_free_own_desc(struct gpio_desc *desc); +struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags); + #endif /* GPIOLIB_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f14123a5a9df..38fc05036015 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -19,7 +19,6 @@ #include #include #include -#include struct device_node; @@ -48,7 +47,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(gc, struct of_mm_gpio_chip, gc); } -extern struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, +extern int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); extern int of_mm_gpiochip_add(struct device_node *np, @@ -63,10 +62,10 @@ extern int of_gpio_simple_xlate(struct gpio_chip *gc, #else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, +static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { - return ERR_PTR(-ENOSYS); + return -ENOSYS; } static inline int of_gpio_simple_xlate(struct gpio_chip *gc, @@ -81,18 +80,6 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { } #endif /* CONFIG_OF_GPIO */ -static inline int of_get_named_gpio_flags(struct device_node *np, - const char *list_name, int index, enum of_gpio_flags *flags) -{ - struct gpio_desc *desc; - desc = of_get_named_gpiod_flags(np, list_name, index, flags); - - if (IS_ERR(desc)) - return PTR_ERR(desc); - else - return desc_to_gpio(desc); -} - /** * of_gpio_named_count() - Count GPIOs for a device * @np: device node to count GPIOs for @@ -129,22 +116,6 @@ static inline int of_gpio_count(struct device_node *np) return of_gpio_named_count(np, "gpios"); } -/** - * of_get_gpiod_flags() - Get a GPIO descriptor and flags to use with GPIO API - * @np: device node to get GPIO from - * @index: index of the GPIO - * @flags: a flags pointer to fill in - * - * Returns GPIO descriptor to use with Linux generic GPIO API, or a errno - * value on the error condition. If @flags is not NULL the function also fills - * in flags for the GPIO. - */ -static inline struct gpio_desc *of_get_gpiod_flags(struct device_node *np, - int index, enum of_gpio_flags *flags) -{ - return of_get_named_gpiod_flags(np, "gpios", index, flags); -} - static inline int of_get_gpio_flags(struct device_node *np, int index, enum of_gpio_flags *flags) { -- cgit v1.2.3 From 9e1e726311830bc5b8b568d5178f6a52c357fb6e Mon Sep 17 00:00:00 2001 From: Matt Porter Date: Wed, 23 Apr 2014 19:21:31 -0400 Subject: mfd: bcm590xx: Add support for secondary I2C slave address BCM590xx utilizes a secondary I2C slave address to access additional register space. Add support for the secondary address space by instantiating a dummy I2C device with the appropriate secondary I2C slave address. Also expose a secondary regmap register space so that MFD drivers can access this secondary i2c slave address space. Signed-off-by: Matt Porter Signed-off-by: Lee Jones --- drivers/mfd/bcm590xx.c | 60 +++++++++++++++++++++++++++++++++----------- include/linux/mfd/bcm590xx.h | 9 ++++--- 2 files changed, 52 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c index e9a33c79431b..43cba1a1973c 100644 --- a/drivers/mfd/bcm590xx.c +++ b/drivers/mfd/bcm590xx.c @@ -28,39 +28,71 @@ static const struct mfd_cell bcm590xx_devs[] = { }, }; -static const struct regmap_config bcm590xx_regmap_config = { +static const struct regmap_config bcm590xx_regmap_config_pri = { .reg_bits = 8, .val_bits = 8, - .max_register = BCM590XX_MAX_REGISTER, + .max_register = BCM590XX_MAX_REGISTER_PRI, .cache_type = REGCACHE_RBTREE, }; -static int bcm590xx_i2c_probe(struct i2c_client *i2c, +static const struct regmap_config bcm590xx_regmap_config_sec = { + .reg_bits = 8, + .val_bits = 8, + .max_register = BCM590XX_MAX_REGISTER_SEC, + .cache_type = REGCACHE_RBTREE, +}; + +static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri, const struct i2c_device_id *id) { struct bcm590xx *bcm590xx; int ret; - bcm590xx = devm_kzalloc(&i2c->dev, sizeof(*bcm590xx), GFP_KERNEL); + bcm590xx = devm_kzalloc(&i2c_pri->dev, sizeof(*bcm590xx), GFP_KERNEL); if (!bcm590xx) return -ENOMEM; - i2c_set_clientdata(i2c, bcm590xx); - bcm590xx->dev = &i2c->dev; - bcm590xx->i2c_client = i2c; + i2c_set_clientdata(i2c_pri, bcm590xx); + bcm590xx->dev = &i2c_pri->dev; + bcm590xx->i2c_pri = i2c_pri; - bcm590xx->regmap = devm_regmap_init_i2c(i2c, &bcm590xx_regmap_config); - if (IS_ERR(bcm590xx->regmap)) { - ret = PTR_ERR(bcm590xx->regmap); - dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret); + bcm590xx->regmap_pri = devm_regmap_init_i2c(i2c_pri, + &bcm590xx_regmap_config_pri); + if (IS_ERR(bcm590xx->regmap_pri)) { + ret = PTR_ERR(bcm590xx->regmap_pri); + dev_err(&i2c_pri->dev, "primary regmap init failed: %d\n", ret); return ret; } - ret = mfd_add_devices(&i2c->dev, -1, bcm590xx_devs, + /* Secondary I2C slave address is the base address with A(2) asserted */ + bcm590xx->i2c_sec = i2c_new_dummy(i2c_pri->adapter, + i2c_pri->addr | BIT(2)); + if (IS_ERR_OR_NULL(bcm590xx->i2c_sec)) { + dev_err(&i2c_pri->dev, "failed to add secondary I2C device\n"); + return -ENODEV; + } + i2c_set_clientdata(bcm590xx->i2c_sec, bcm590xx); + + bcm590xx->regmap_sec = devm_regmap_init_i2c(bcm590xx->i2c_sec, + &bcm590xx_regmap_config_sec); + if (IS_ERR(bcm590xx->regmap_sec)) { + ret = PTR_ERR(bcm590xx->regmap_sec); + dev_err(&bcm590xx->i2c_sec->dev, + "secondary regmap init failed: %d\n", ret); + goto err; + } + + ret = mfd_add_devices(&i2c_pri->dev, -1, bcm590xx_devs, ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL); - if (ret < 0) - dev_err(&i2c->dev, "failed to add sub-devices: %d\n", ret); + if (ret < 0) { + dev_err(&i2c_pri->dev, "failed to add sub-devices: %d\n", ret); + goto err; + } + + return 0; +err: + i2c_unregister_device(bcm590xx->i2c_sec); return ret; } diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h index 434df2d4e587..267aedee1c7a 100644 --- a/include/linux/mfd/bcm590xx.h +++ b/include/linux/mfd/bcm590xx.h @@ -19,12 +19,15 @@ #include /* max register address */ -#define BCM590XX_MAX_REGISTER 0xe7 +#define BCM590XX_MAX_REGISTER_PRI 0xe7 +#define BCM590XX_MAX_REGISTER_SEC 0xf0 struct bcm590xx { struct device *dev; - struct i2c_client *i2c_client; - struct regmap *regmap; + struct i2c_client *i2c_pri; + struct i2c_client *i2c_sec; + struct regmap *regmap_pri; + struct regmap *regmap_sec; unsigned int id; }; -- cgit v1.2.3 From e814e71ba4a6e1d7509b0f4b1928365ea650cace Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 21 May 2014 13:59:08 -0600 Subject: blk-mq: allow the hctx cpu hotplug notifier to return errors Prepare this for the next patch which adds more smarts in the plugging logic, so that we can save some memory. Signed-off-by: Jens Axboe --- block/blk-mq-cpu.c | 12 ++++++++---- block/blk-mq.c | 9 +++++---- block/blk-mq.h | 2 +- include/linux/blk-mq.h | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 136ef8643bba..d2c253f71b86 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -18,14 +18,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, { unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; + int ret = NOTIFY_OK; raw_spin_lock(&blk_mq_cpu_notify_lock); - list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) - notify->notify(notify->data, action, cpu); + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { + ret = notify->notify(notify->data, action, cpu); + if (ret != NOTIFY_OK) + break; + } raw_spin_unlock(&blk_mq_cpu_notify_lock); - return NOTIFY_OK; + return ret; } void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) @@ -45,7 +49,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, - void (*fn)(void *, unsigned long, unsigned int), + int (*fn)(void *, unsigned long, unsigned int), void *data) { notifier->notify = fn; diff --git a/block/blk-mq.c b/block/blk-mq.c index ef7ed5e95d6d..5a3683fc5bdb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1196,8 +1196,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, } EXPORT_SYMBOL(blk_mq_free_single_hw_queue); -static void blk_mq_hctx_notify(void *data, unsigned long action, - unsigned int cpu) +static int blk_mq_hctx_notify(void *data, unsigned long action, + unsigned int cpu) { struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; @@ -1205,7 +1205,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, LIST_HEAD(tmp); if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) - return; + return NOTIFY_OK; /* * Move ctx entries to new CPU, if this one is going away. @@ -1220,7 +1220,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, spin_unlock(&ctx->lock); if (list_empty(&tmp)) - return; + return NOTIFY_OK; ctx = blk_mq_get_ctx(q); spin_lock(&ctx->lock); @@ -1240,6 +1240,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_run_hw_queue(hctx, true); blk_mq_put_ctx(ctx); + return NOTIFY_OK; } static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, diff --git a/block/blk-mq.h b/block/blk-mq.h index 7db4fe4bd002..491dbd4e93f5 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -39,7 +39,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); */ struct blk_mq_cpu_notifier; void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, - void (*fn)(void *, unsigned long, unsigned int), + int (*fn)(void *, unsigned long, unsigned int), void *data); void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f45424453338..4d2800567aad 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -8,7 +8,7 @@ struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list; void *data; - void (*notify)(void *data, unsigned long action, unsigned int cpu); + int (*notify)(void *data, unsigned long action, unsigned int cpu); }; struct blk_mq_ctxmap { -- cgit v1.2.3 From db885bf82883f9743efe09d91775c579c0ed6842 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 16 May 2014 15:17:12 +0300 Subject: ARM: edma: Remove queue_tc_mapping data from edma_soc_info It is no longer in use by the driver or board files. Signed-off-by: Peter Ujfalusi Signed-off-by: Sekhar Nori --- include/linux/platform_data/edma.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 12f134b1493c..633e196ebdf2 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -175,7 +175,6 @@ struct edma_soc_info { /* Resource reservation for other cores */ struct edma_rsv_info *rsv; - s8 (*queue_tc_mapping)[2]; s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; }; -- cgit v1.2.3 From ba391e5a5ac6697b8bcae8c0d01439cb765d9ef8 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Wed, 21 May 2014 11:15:56 -0400 Subject: HID: rmi: do not handle touchscreens through hid-rmi Currently, hid-rmi drives every Synaptics product, but the touchscreens on the Windows tablets should be handled through hid-multitouch. Instead of providing a long list of PIDs, rely on the scan_report capability to detect which should go to hid-multitouch, and which should not go to hid-rmi. related bug: https://bugzilla.kernel.org/show_bug.cgi?id=74241 https://bugzilla.redhat.com/show_bug.cgi?id=1089583 Signed-off-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 10 ++++++++-- drivers/hid/hid-rmi.c | 3 +-- include/linux/hid.h | 8 ++++++++ 3 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f05255d92de7..64c71c866916 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -776,6 +776,14 @@ static int hid_scan_report(struct hid_device *hid) (hid->group == HID_GROUP_MULTITOUCH)) hid->group = HID_GROUP_MULTITOUCH_WIN_8; + /* + * Vendor specific handlings + */ + if ((hid->vendor == USB_VENDOR_ID_SYNAPTICS) && + (hid->group == HID_GROUP_GENERIC)) + /* hid-rmi should take care of them, not hid-generic */ + hid->group = HID_GROUP_RMI; + vfree(parser); return 0; } @@ -1882,8 +1890,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index c529b033ba9e..2451c7e5febd 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -894,8 +894,7 @@ static void rmi_remove(struct hid_device *hdev) } static const struct hid_device_id rmi_id[] = { - { HID_I2C_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, HID_ANY_ID) }, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, rmi_id); diff --git a/include/linux/hid.h b/include/linux/hid.h index 54f855b2c902..8ce9ff4d50af 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -299,12 +299,20 @@ struct hid_item { /* * HID device groups + * + * Note: HID_GROUP_ANY is declared in linux/mod_devicetable.h + * and has a value of 0x0000 */ #define HID_GROUP_GENERIC 0x0001 #define HID_GROUP_MULTITOUCH 0x0002 #define HID_GROUP_SENSOR_HUB 0x0003 #define HID_GROUP_MULTITOUCH_WIN_8 0x0004 +/* + * Vendor specific HID device groups + */ +#define HID_GROUP_RMI 0x0100 + /* * This is the global environment of the parser. This information is * persistent for main-items. The global environment can be saved and -- cgit v1.2.3 From 7aa2c016db2162defff77f6f5731bff3f25e5175 Mon Sep 17 00:00:00 2001 From: Dongsheng Yang Date: Thu, 8 May 2014 18:33:49 +0900 Subject: sched: Consolidate open coded implementations of nice level frobbing into nice_to_rlimit() and rlimit_to_nice() Signed-off-by: Dongsheng Yang Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/a568a1e3cc8e78648f41b5035fa5e381d36274da.1399532322.git.yangds.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- drivers/staging/android/binder.c | 2 +- include/linux/sched/prio.h | 16 ++++++++++++++++ kernel/sched/core.c | 2 +- kernel/sys.c | 6 +++--- 4 files changed, 21 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 179b21b66504..9311bb67ec35 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -436,7 +436,7 @@ static void binder_set_nice(long nice) set_user_nice(current, nice); return; } - min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; + min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: nice value %ld not allowed use %ld instead\n", current->pid, nice, min_nice); diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index ac322583c820..d9cf5a5762d9 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -41,4 +41,20 @@ #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) +/* + * Convert nice value [19,-20] to rlimit style value [1,40]. + */ +static inline long nice_to_rlimit(long nice) +{ + return (MAX_NICE - nice + 1); +} + +/* + * Convert rlimit style value [1,40] to nice value [-20, 19]. + */ +static inline long rlimit_to_nice(long prio) +{ + return (MAX_NICE - prio + 1); +} + #endif /* _SCHED_PRIO_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index da302ca98f60..321d800e4baa 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3033,7 +3033,7 @@ EXPORT_SYMBOL(set_user_nice); int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ - int nice_rlim = 20 - nice; + int nice_rlim = nice_to_rlimit(nice); return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || capable(CAP_SYS_NICE)); diff --git a/kernel/sys.c b/kernel/sys.c index fba0f29401ea..66a751ebf9d9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -250,7 +250,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) else p = current; if (p) { - niceval = 20 - task_nice(p); + niceval = nice_to_rlimit(task_nice(p)); if (niceval > retval) retval = niceval; } @@ -261,7 +261,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) else pgrp = task_pgrp(current); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { - niceval = 20 - task_nice(p); + niceval = nice_to_rlimit(task_nice(p)); if (niceval > retval) retval = niceval; } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); @@ -277,7 +277,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) do_each_thread(g, p) { if (uid_eq(task_uid(p), uid)) { - niceval = 20 - task_nice(p); + niceval = nice_to_rlimit(task_nice(p)); if (niceval > retval) retval = niceval; } -- cgit v1.2.3 From 4027d080854d1be96ef134a1c3024d5276114db6 Mon Sep 17 00:00:00 2001 From: "xiaofeng.yan" Date: Fri, 9 May 2014 03:21:27 +0000 Subject: sched/rt: Fix 'struct sched_dl_entity' and dl_task_time() comments, to match the current upstream code Signed-off-by: xiaofeng.yan Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1399605687-18094-1-git-send-email-xiaofeng.yan@huawei.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++-- kernel/sched/deadline.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 725eef121c9f..0f91d00efd87 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1175,8 +1175,8 @@ struct sched_dl_entity { /* * Original scheduling parameters. Copied here from sched_attr - * during sched_setscheduler2(), they will remain the same until - * the next sched_setscheduler2(). + * during sched_setattr(), they will remain the same until + * the next sched_setattr(). */ u64 dl_runtime; /* maximum runtime for each instance */ u64 dl_deadline; /* relative deadline of each instance */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e0a04ae1e0dd..f9ca7d19781a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -520,7 +520,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) * We need to take care of a possible races here. In fact, the * task might have changed its scheduling policy to something * different from SCHED_DEADLINE or changed its reservation - * parameters (through sched_setscheduler()). + * parameters (through sched_setattr()). */ if (!dl_task(p) || dl_se->dl_new) goto unlock; -- cgit v1.2.3 From 903ed4913c7fe78d2746445564634264291c7493 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 16 May 2014 15:17:20 +0300 Subject: ARM: edma: Remove redundant/unused parameters from edma_soc_info The following parameters are no longer needed by the edma driver since the information can be obtained from the IP's CCCFG register: n_channel, n_region, n_slot and n_tc. Remove the n_cc as well since in this context it has no meaning. We have separate edma_soc_info struct/eDMA3_CC instance so this member does not make any sense (and the driver no longer uses it). Signed-off-by: Peter Ujfalusi Signed-off-by: Sekhar Nori --- include/linux/platform_data/edma.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 633e196ebdf2..eb8d5627d080 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -158,13 +158,6 @@ struct edma_rsv_info { /* platform_data for EDMA driver */ struct edma_soc_info { - - /* how many dma resources of each type */ - unsigned n_channel; - unsigned n_region; - unsigned n_slot; - unsigned n_tc; - unsigned n_cc; /* * Default queue is expected to be a low-priority queue. * This way, long transfers on the default queue started -- cgit v1.2.3 From 9edbcd2252b5ef148177c9f2c11a56469cf5db52 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 17 Apr 2014 19:48:07 +0200 Subject: PCI: Remove pcibios_add_platform_entries() Remove pcibios_add_platform_entries(). Architecture-specific attributes can be achieved by setting pdev->dev.groups. Link: https://lkml.kernel.org/r/alpine.LFD.2.11.1404141101500.1529@denkbrett Signed-off-by: Sebastian Ott Signed-off-by: Bjorn Helgaas Acked-by: Greg Kroah-Hartman --- drivers/pci/pci-sysfs.c | 10 ---------- include/linux/pci.h | 1 - 2 files changed, 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 3db1c7ff5dd3..b7333fa5f80d 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1273,11 +1273,6 @@ static struct bin_attribute pcie_config_attr = { .write = pci_write_config, }; -int __weak pcibios_add_platform_entries(struct pci_dev *dev) -{ - return 0; -} - static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1393,11 +1388,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) pdev->rom_attr = attr; } - /* add platform-specific attributes */ - retval = pcibios_add_platform_entries(pdev); - if (retval) - goto err_rom_file; - /* add sysfs entries for various capabilities */ retval = pci_create_capabilities_sysfs(pdev); if (retval) diff --git a/include/linux/pci.h b/include/linux/pci.h index a95aac7ad37f..84182b153b21 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1572,7 +1572,6 @@ extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mem_size; /* Architecture-specific versions may override these (weak) */ -int pcibios_add_platform_entries(struct pci_dev *dev); void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); int pcibios_set_pcie_reset_state(struct pci_dev *dev, -- cgit v1.2.3 From 11d200e95f3e84c1102e4cc9863a3614fd41f3ad Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Fri, 14 Mar 2014 17:00:14 +0000 Subject: lib: add glibc style strchrnul() variant The strchrnul() variant helpfully returns a the end of the string instead of a NULL if the requested character is not found. This can simplify string parsing code since it doesn't need to expicitly check for a NULL return. If a valid string pointer is passed in, then a valid null terminated string will always come back out. Signed-off-by: Grant Likely --- include/linux/string.h | 3 +++ lib/string.c | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'include/linux') diff --git a/include/linux/string.h b/include/linux/string.h index ac889c5ea11b..d36977e029af 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -52,6 +52,9 @@ extern int strncasecmp(const char *s1, const char *s2, size_t n); #ifndef __HAVE_ARCH_STRCHR extern char * strchr(const char *,int); #endif +#ifndef __HAVE_ARCH_STRCHRNUL +extern char * strchrnul(const char *,int); +#endif #ifndef __HAVE_ARCH_STRNCHR extern char * strnchr(const char *, size_t, int); #endif diff --git a/lib/string.c b/lib/string.c index 9b1f9062a202..e0c20eb362f0 100644 --- a/lib/string.c +++ b/lib/string.c @@ -301,6 +301,24 @@ char *strchr(const char *s, int c) EXPORT_SYMBOL(strchr); #endif +#ifndef __HAVE_ARCH_STRCHRNUL +/** + * strchrnul - Find and return a character in a string, or end of string + * @s: The string to be searched + * @c: The character to search for + * + * Returns pointer to first occurrence of 'c' in s. If c is not found, then + * return a pointer to the null byte at the end of s. + */ +char *strchrnul(const char *s, int c) +{ + while (*s && *s != (char)c) + s++; + return (char *)s; +} +EXPORT_SYMBOL(strchrnul); +#endif + #ifndef __HAVE_ARCH_STRRCHR /** * strrchr - Find the last occurrence of a character in a string -- cgit v1.2.3 From ad69674e73a18dc3a8da557f4059ccf9389531a5 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 20 May 2014 13:42:02 +0300 Subject: of/irq: do irq resolution in platform_get_irq_byname() The commit 9ec36cafe43bf835f8f29273597a5b0cbc8267ef "of/irq: do irq resolution in platform_get_irq" from Rob Herring - moves resolving of the interrupt resources in platform_get_irq(). But this solution isn't complete because platform_get_irq_byname() need to be modified the same way. Hence, fix it by adding interrupt resolution code at the platform_get_irq_byname() function too. Cc: Russell King Cc: Rob Herring Cc: Tony Lindgren Cc: Grant Likely Cc: Thierry Reding Signed-off-by: Grygorii Strashko Signed-off-by: Grant Likely --- drivers/base/platform.c | 7 +++++-- drivers/of/irq.c | 22 ++++++++++++++++++++++ include/linux/of_irq.h | 5 +++++ 3 files changed, 32 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 5b47210889e0..9e9227e1762d 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -131,9 +131,12 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname); */ int platform_get_irq_byname(struct platform_device *dev, const char *name) { - struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, - name); + struct resource *r; + + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) + return of_irq_get_byname(dev->dev.of_node, name); + r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 5aeb89411350..3e06a699352d 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -405,6 +405,28 @@ int of_irq_get(struct device_node *dev, int index) return irq_create_of_mapping(&oirq); } +/** + * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number + * @dev: pointer to device tree node + * @name: irq name + * + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain + * is not yet created, or error code in case of any other failure. + */ +int of_irq_get_byname(struct device_node *dev, const char *name) +{ + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(dev, "interrupt-names", name); + if (index < 0) + return index; + + return of_irq_get(dev, index); +} + /** * of_irq_count - Count the number of IRQs a node uses * @dev: pointer to device tree node diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 6404253d810d..bfec136a6d1e 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -45,6 +45,7 @@ extern void of_irq_init(const struct of_device_id *matches); #ifdef CONFIG_OF_IRQ extern int of_irq_count(struct device_node *dev); extern int of_irq_get(struct device_node *dev, int index); +extern int of_irq_get_byname(struct device_node *dev, const char *name); #else static inline int of_irq_count(struct device_node *dev) { @@ -54,6 +55,10 @@ static inline int of_irq_get(struct device_node *dev, int index) { return 0; } +static inline int of_irq_get_byname(struct device_node *dev, const char *name) +{ + return 0; +} #endif #if defined(CONFIG_OF) -- cgit v1.2.3 From 9e9dc7d9597bd6881b3e7ae6ae3d710319605c47 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 8 May 2014 23:16:34 +0200 Subject: mfd: stmpe: root out static GPIO and IRQ assignments The only platform using the STMPE expander now boots from device tree using all-dynamic GPIO and IRQ number assignments, so remove the mechanism to pass this from the device tree entirely. Signed-off-by: Linus Walleij --- drivers/gpio/gpio-stmpe.c | 18 +++++------------- drivers/mfd/stmpe.c | 6 +----- include/linux/mfd/stmpe.h | 14 -------------- 3 files changed, 6 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 2776a09bee58..628b58494294 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -23,7 +23,8 @@ enum { REG_RE, REG_FE, REG_IE }; #define CACHE_NR_REGS 3 -#define CACHE_NR_BANKS (STMPE_NR_GPIOS / 8) +/* No variant has more than 24 GPIOs */ +#define CACHE_NR_BANKS (24 / 8) struct stmpe_gpio { struct gpio_chip chip; @@ -31,8 +32,6 @@ struct stmpe_gpio { struct device *dev; struct mutex irq_lock; struct irq_domain *domain; - - int irq_base; unsigned norequest_mask; /* Caches of interrupt control registers for bus_lock */ @@ -311,13 +310,8 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, struct device_node *np) { - int base = 0; - - if (!np) - base = stmpe_gpio->irq_base; - stmpe_gpio->domain = irq_domain_add_simple(np, - stmpe_gpio->chip.ngpio, base, + stmpe_gpio->chip.ngpio, 0, &stmpe_gpio_irq_simple_ops, stmpe_gpio); if (!stmpe_gpio->domain) { dev_err(stmpe_gpio->dev, "failed to create irqdomain\n"); @@ -354,7 +348,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) #ifdef CONFIG_OF stmpe_gpio->chip.of_node = np; #endif - stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; + stmpe_gpio->chip.base = -1; if (pdata) stmpe_gpio->norequest_mask = pdata->norequest_mask; @@ -362,9 +356,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) of_property_read_u32(np, "st,norequest-mask", &stmpe_gpio->norequest_mask); - if (irq >= 0) - stmpe_gpio->irq_base = stmpe->irq_base + STMPE_INT_GPIO(0); - else + if (irq < 0) dev_info(&pdev->dev, "device configured in no-irq mode; " "irqs are not available\n"); diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 294731be1a15..3b6bfa7184ad 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -996,9 +996,6 @@ static int stmpe_irq_init(struct stmpe *stmpe, struct device_node *np) int base = 0; int num_irqs = stmpe->variant->num_irqs; - if (!np) - base = stmpe->irq_base; - stmpe->domain = irq_domain_add_simple(np, num_irqs, base, &stmpe_irq_ops, stmpe); if (!stmpe->domain) { @@ -1077,7 +1074,7 @@ static int stmpe_chip_init(struct stmpe *stmpe) static int stmpe_add_device(struct stmpe *stmpe, const struct mfd_cell *cell) { return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1, - NULL, stmpe->irq_base, stmpe->domain); + NULL, 0, stmpe->domain); } static int stmpe_devices_init(struct stmpe *stmpe) @@ -1181,7 +1178,6 @@ int stmpe_probe(struct stmpe_client_info *ci, int partnum) stmpe->dev = ci->dev; stmpe->client = ci->client; stmpe->pdata = pdata; - stmpe->irq_base = pdata->irq_base; stmpe->ci = ci; stmpe->partnum = partnum; stmpe->variant = stmpe_variant_info[partnum]; diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index 980898620e57..575a86c7fcbd 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h @@ -76,7 +76,6 @@ struct stmpe_client_info; * @regs: list of addresses of registers which are at different addresses on * different variants. Indexed by one of STMPE_IDX_*. * @irq: irq number for stmpe - * @irq_base: starting IRQ number for internal IRQs * @num_gpios: number of gpios, differs for variants * @ier: cache of IER registers for bus_lock * @oldier: cache of IER registers for bus_lock @@ -96,7 +95,6 @@ struct stmpe { const u8 *regs; int irq; - int irq_base; int num_gpios; u8 ier[2]; u8 oldier[2]; @@ -137,8 +135,6 @@ struct stmpe_keypad_platform_data { /** * struct stmpe_gpio_platform_data - STMPE GPIO platform data - * @gpio_base: first gpio number assigned. A maximum of - * %STMPE_NR_GPIOS GPIOs will be allocated. * @norequest_mask: bitmask specifying which GPIOs should _not_ be * requestable due to different usage (e.g. touch, keypad) * STMPE_GPIO_NOREQ_* macros can be used here. @@ -146,7 +142,6 @@ struct stmpe_keypad_platform_data { * @remove: board specific remove callback */ struct stmpe_gpio_platform_data { - int gpio_base; unsigned norequest_mask; void (*setup)(struct stmpe *stmpe, unsigned gpio_base); void (*remove)(struct stmpe *stmpe, unsigned gpio_base); @@ -200,8 +195,6 @@ struct stmpe_ts_platform_data { * @irq_trigger: IRQ trigger to use for the interrupt to the host * @autosleep: bool to enable/disable stmpe autosleep * @autosleep_timeout: inactivity timeout in milliseconds for autosleep - * @irq_base: base IRQ number. %STMPE_NR_IRQS irqs will be used, or - * %STMPE_NR_INTERNAL_IRQS if the GPIO driver is not used. * @irq_over_gpio: true if gpio is used to get irq * @irq_gpio: gpio number over which irq will be requested (significant only if * irq_over_gpio is true) @@ -212,7 +205,6 @@ struct stmpe_ts_platform_data { struct stmpe_platform_data { int id; unsigned int blocks; - int irq_base; unsigned int irq_trigger; bool autosleep; bool irq_over_gpio; @@ -224,10 +216,4 @@ struct stmpe_platform_data { struct stmpe_ts_platform_data *ts; }; -#define STMPE_NR_INTERNAL_IRQS 9 -#define STMPE_INT_GPIO(x) (STMPE_NR_INTERNAL_IRQS + (x)) - -#define STMPE_NR_GPIOS 24 -#define STMPE_NR_IRQS STMPE_INT_GPIO(STMPE_NR_GPIOS) - #endif -- cgit v1.2.3 From 43339bed7010da6e7cf797db3216a136a974a0cd Mon Sep 17 00:00:00 2001 From: Eli Billauer Date: Fri, 16 May 2014 11:26:35 +0300 Subject: devres: Add devm_get_free_pages API devm_get_free_pages() and devm_free_pages() are the managed counterparts for __get_free_pages() and free_pages(). Signed-off-by: Eli Billauer Acked-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- Documentation/driver-model/devres.txt | 2 + drivers/base/devres.c | 76 +++++++++++++++++++++++++++++++++++ include/linux/device.h | 4 ++ 3 files changed, 82 insertions(+) (limited to 'include/linux') diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 499951873997..e1a27074caae 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -237,6 +237,8 @@ MEM devm_kzalloc() devm_kfree() devm_kmemdup() + devm_get_free_pages() + devm_free_pages() IIO devm_iio_device_alloc() diff --git a/drivers/base/devres.c b/drivers/base/devres.c index d0914cba2413..52302946770f 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -852,3 +852,79 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) return p; } EXPORT_SYMBOL_GPL(devm_kmemdup); + +struct pages_devres { + unsigned long addr; + unsigned int order; +}; + +static int devm_pages_match(struct device *dev, void *res, void *p) +{ + struct pages_devres *devres = res; + struct pages_devres *target = p; + + return devres->addr == target->addr; +} + +static void devm_pages_release(struct device *dev, void *res) +{ + struct pages_devres *devres = res; + + free_pages(devres->addr, devres->order); +} + +/** + * devm_get_free_pages - Resource-managed __get_free_pages + * @dev: Device to allocate memory for + * @gfp_mask: Allocation gfp flags + * @order: Allocation size is (1 << order) pages + * + * Managed get_free_pages. Memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Address of allocated memory on success, 0 on failure. + */ + +unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order) +{ + struct pages_devres *devres; + unsigned long addr; + + addr = __get_free_pages(gfp_mask, order); + + if (unlikely(!addr)) + return 0; + + devres = devres_alloc(devm_pages_release, + sizeof(struct pages_devres), GFP_KERNEL); + if (unlikely(!devres)) { + free_pages(addr, order); + return 0; + } + + devres->addr = addr; + devres->order = order; + + devres_add(dev, devres); + return addr; +} +EXPORT_SYMBOL_GPL(devm_get_free_pages); + +/** + * devm_free_pages - Resource-managed free_pages + * @dev: Device this memory belongs to + * @addr: Memory to free + * + * Free memory allocated with devm_get_free_pages(). Unlike free_pages, + * there is no need to supply the @order. + */ +void devm_free_pages(struct device *dev, unsigned long addr) +{ + struct pages_devres devres = { .addr = addr }; + + WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, + &devres)); +} +EXPORT_SYMBOL_GPL(devm_free_pages); diff --git a/include/linux/device.h b/include/linux/device.h index ab871588da89..3dc69a2faa51 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -626,6 +626,10 @@ extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); +extern unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order); +extern void devm_free_pages(struct device *dev, unsigned long addr); + void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); void __iomem *devm_request_and_ioremap(struct device *dev, struct resource *res); -- cgit v1.2.3 From 9f8c0fe9542141fd0008d5c0f6ae365890f6da94 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Fri, 23 May 2014 16:44:10 +0100 Subject: regulator: Constify the pointer to alias name array Toughen-up checks for read-only regulator names. Signed-off-by: Lee Jones Signed-off-by: Mark Brown --- drivers/regulator/core.c | 7 ++++--- drivers/regulator/devres.c | 6 +++--- include/linux/mfd/core.h | 2 +- include/linux/regulator/consumer.h | 36 ++++++++++++++++++++---------------- 4 files changed, 28 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 9a09f3cdbabb..ba28d29b66d2 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1597,9 +1597,10 @@ EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias); * registered any aliases that were registered will be removed * before returning to the caller. */ -int regulator_bulk_register_supply_alias(struct device *dev, const char **id, +int regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, struct device *alias_dev, - const char **alias_id, + const char *const *alias_id, int num_id) { int i; @@ -1637,7 +1638,7 @@ EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias); * aliases in one operation. */ void regulator_bulk_unregister_supply_alias(struct device *dev, - const char **id, + const char *const *id, int num_id) { int i; diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c index f44818b838dc..8f785bc9e510 100644 --- a/drivers/regulator/devres.c +++ b/drivers/regulator/devres.c @@ -360,9 +360,9 @@ EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias); * will be removed before returning to the caller. */ int devm_regulator_bulk_register_supply_alias(struct device *dev, - const char **id, + const char *const *id, struct device *alias_dev, - const char **alias_id, + const char *const *alias_id, int num_id) { int i; @@ -404,7 +404,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias); * will ensure that the resource is freed. */ void devm_regulator_bulk_unregister_supply_alias(struct device *dev, - const char **id, + const char *const *id, int num_id) { int i; diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index bdba8c61207b..f543de91ce19 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -63,7 +63,7 @@ struct mfd_cell { /* A list of regulator supplies that should be mapped to the MFD * device rather than the child device when requested */ - const char **parent_supplies; + const char * const *parent_supplies; int num_parent_supplies; }; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index e530681bea70..10d0a53f4cd3 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -151,11 +151,13 @@ int regulator_register_supply_alias(struct device *dev, const char *id, const char *alias_id); void regulator_unregister_supply_alias(struct device *dev, const char *id); -int regulator_bulk_register_supply_alias(struct device *dev, const char **id, +int regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, struct device *alias_dev, - const char **alias_id, int num_id); + const char *const *alias_id, + int num_id); void regulator_bulk_unregister_supply_alias(struct device *dev, - const char **id, int num_id); + const char * const *id, int num_id); int devm_regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, @@ -164,12 +166,12 @@ void devm_regulator_unregister_supply_alias(struct device *dev, const char *id); int devm_regulator_bulk_register_supply_alias(struct device *dev, - const char **id, + const char *const *id, struct device *alias_dev, - const char **alias_id, + const char *const *alias_id, int num_id); void devm_regulator_bulk_unregister_supply_alias(struct device *dev, - const char **id, + const char *const *id, int num_id); /* regulator output control and status */ @@ -290,17 +292,17 @@ static inline void regulator_unregister_supply_alias(struct device *dev, } static inline int regulator_bulk_register_supply_alias(struct device *dev, - const char **id, - struct device *alias_dev, - const char **alias_id, - int num_id) + const char *const *id, + struct device *alias_dev, + const char * const *alias_id, + int num_id) { return 0; } static inline void regulator_bulk_unregister_supply_alias(struct device *dev, - const char **id, - int num_id) + const char * const *id, + int num_id) { } @@ -317,15 +319,17 @@ static inline void devm_regulator_unregister_supply_alias(struct device *dev, { } -static inline int devm_regulator_bulk_register_supply_alias( - struct device *dev, const char **id, struct device *alias_dev, - const char **alias_id, int num_id) +static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, + struct device *alias_dev, + const char *const *alias_id, + int num_id) { return 0; } static inline void devm_regulator_bulk_unregister_supply_alias( - struct device *dev, const char **id, int num_id) + struct device *dev, const char *const *id, int num_id) { } -- cgit v1.2.3 From edf866b3805c5651bf7d035b72dc0190cb6ff4a7 Mon Sep 17 00:00:00 2001 From: Sam Bradshaw Date: Fri, 23 May 2014 13:30:16 -0600 Subject: blk-mq: export blk_mq_tag_busy_iter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Export the blk-mq in-flight tag iterator for driver consumption. This is particularly useful in exception paths or SRSI where in-flight IOs need to be cancelled and/or reissued. The NVMe driver conversion will use this. Signed-off-by: Sam Bradshaw Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 1 + block/blk-mq-tag.h | 1 - include/linux/blk-mq.h | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index f6dea968b710..05e2baf4fa0d 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -400,6 +400,7 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, fn(data, tag_map); kfree(tag_map); } +EXPORT_SYMBOL(blk_mq_tag_busy_iter); static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index e7ff5ceeeb97..2e5e6872d089 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -51,7 +51,6 @@ extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved); extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); -extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4d2800567aad..f76bb18350af 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -181,6 +181,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); +void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); /* * Driver command data is immediately after the request. So subtract request -- cgit v1.2.3 From 79c6ab509558f9871175c7e4411f857de12cf33b Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Fri, 23 May 2014 18:32:15 +0530 Subject: clk: divider: add CLK_DIVIDER_READ_ONLY flag From: Heiko Stuebner Similar to muxes which already have a read-only flag there sometimes exist dividers which should not be changed by the clock framework but whose value still should be readable. Therefore add a READ_ONLY flag similar to the mux-one to clk-divider Signed-off-by: Heiko Stuebner [changed flag bit to BIT(5) as suggested by Tomasz Figa] Signed-off-by: Thomas Abraham Acked-by: Tomasz Figa Acked-by: Max Schwarz Tested-by: Max Schwarz Signed-off-by: Mike Turquette --- drivers/clk/clk-divider.c | 10 +++++++++- include/linux/clk-provider.h | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index b3c83966be18..c9343f5d9918 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -361,6 +361,11 @@ const struct clk_ops clk_divider_ops = { }; EXPORT_SYMBOL_GPL(clk_divider_ops); +const struct clk_ops clk_divider_ro_ops = { + .recalc_rate = clk_divider_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_divider_ro_ops); + static struct clk *_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, @@ -386,7 +391,10 @@ static struct clk *_register_divider(struct device *dev, const char *name, } init.name = name; - init.ops = &clk_divider_ops; + if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) + init.ops = &clk_divider_ro_ops; + else + init.ops = &clk_divider_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 40809431641e..c7135dbbcd65 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -320,6 +320,8 @@ struct clk_div_table { * updated to indicate changing divider bits. * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded * to the closest integer instead of the up one. + * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should + * not be changed by the clock framework. */ struct clk_divider { struct clk_hw hw; @@ -336,8 +338,10 @@ struct clk_divider { #define CLK_DIVIDER_ALLOW_ZERO BIT(2) #define CLK_DIVIDER_HIWORD_MASK BIT(3) #define CLK_DIVIDER_ROUND_CLOSEST BIT(4) +#define CLK_DIVIDER_READ_ONLY BIT(5) extern const struct clk_ops clk_divider_ops; +extern const struct clk_ops clk_divider_ro_ops; struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, -- cgit v1.2.3 From 49b2f4c56fbf70ca693d6df1c491f0566d516aea Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Tue, 15 Apr 2014 08:35:25 -0300 Subject: [media] exynos4-is: Remove support for non-dt platforms All platforms supported by this driver are going to get device tree support in this kernel release so remove code that would have been actually not used any more. Signed-off-by: Sylwester Nawrocki Acked-by: Kyungmin Park Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/fimc.txt | 30 -- MAINTAINERS | 1 - drivers/media/platform/exynos4-is/Kconfig | 3 +- drivers/media/platform/exynos4-is/common.c | 2 +- drivers/media/platform/exynos4-is/fimc-core.h | 2 +- drivers/media/platform/exynos4-is/fimc-isp-video.c | 2 +- drivers/media/platform/exynos4-is/fimc-isp.h | 2 +- drivers/media/platform/exynos4-is/fimc-lite-reg.c | 2 +- drivers/media/platform/exynos4-is/fimc-lite.c | 2 +- drivers/media/platform/exynos4-is/fimc-lite.h | 2 +- drivers/media/platform/exynos4-is/fimc-reg.c | 2 +- drivers/media/platform/exynos4-is/media-dev.c | 329 ++------------------- drivers/media/platform/exynos4-is/media-dev.h | 6 +- drivers/media/platform/exynos4-is/mipi-csis.c | 43 +-- include/linux/platform_data/mipi-csis.h | 28 -- include/media/exynos-fimc.h | 161 ++++++++++ include/media/s5p_fimc.h | 182 ------------ 17 files changed, 211 insertions(+), 588 deletions(-) delete mode 100644 include/linux/platform_data/mipi-csis.h create mode 100644 include/media/exynos-fimc.h delete mode 100644 include/media/s5p_fimc.h (limited to 'include/linux') diff --git a/Documentation/video4linux/fimc.txt b/Documentation/video4linux/fimc.txt index 7d6e160724bd..e0c6b8bc4743 100644 --- a/Documentation/video4linux/fimc.txt +++ b/Documentation/video4linux/fimc.txt @@ -140,39 +140,9 @@ You can either grep through the kernel log to find relevant information, i.e. or retrieve the information from /dev/media? with help of the media-ctl tool: # media-ctl -p -6. Platform support -=================== - -The machine code (arch/arm/plat-samsung and arch/arm/mach-*) must select -following options: - -CONFIG_S5P_DEV_FIMC0 mandatory -CONFIG_S5P_DEV_FIMC1 \ -CONFIG_S5P_DEV_FIMC2 | optional -CONFIG_S5P_DEV_FIMC3 | -CONFIG_S5P_SETUP_FIMC / -CONFIG_S5P_DEV_CSIS0 \ optional for MIPI-CSI interface -CONFIG_S5P_DEV_CSIS1 / - -Except that, relevant s5p_device_fimc? should be registered in the machine code -in addition to a "s5p-fimc-md" platform device to which the media device driver -is bound. The "s5p-fimc-md" device instance is required even if only mem-to-mem -operation is used. - -The description of sensor(s) attached to FIMC/MIPI-CSIS camera inputs should be -passed as the "s5p-fimc-md" device platform_data. The platform data structure -is defined in file include/media/s5p_fimc.h. - 7. Build ======== -This driver depends on following config options: -PLAT_S5P, -PM_RUNTIME, -I2C, -REGULATOR, -VIDEO_V4L2_SUBDEV_API, - If the driver is built as a loadable kernel module (CONFIG_VIDEO_SAMSUNG_S5P_FIMC=m) two modules are created (in addition to the core v4l2 modules): s5p-fimc.ko and optional s5p-csis.ko (MIPI-CSI receiver subdev). diff --git a/MAINTAINERS b/MAINTAINERS index 129621ed165f..6b7c633a2e98 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7654,7 +7654,6 @@ L: linux-media@vger.kernel.org Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Supported F: drivers/media/platform/exynos4-is/ -F: include/media/s5p_fimc.h SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER M: Sylwester Nawrocki diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig index e1b2ceba00c1..5dcaa0a80540 100644 --- a/drivers/media/platform/exynos4-is/Kconfig +++ b/drivers/media/platform/exynos4-is/Kconfig @@ -3,6 +3,7 @@ config VIDEO_SAMSUNG_EXYNOS4_IS bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver" depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on (PLAT_S5P || ARCH_EXYNOS) + depends on OF && COMMON_CLK help Say Y here to enable camera host interface devices for Samsung S5P and EXYNOS SoC series. @@ -17,7 +18,7 @@ config VIDEO_S5P_FIMC depends on I2C select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV - select MFD_SYSCON if OF + select MFD_SYSCON select VIDEO_EXYNOS4_IS_COMMON help This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC camera host diff --git a/drivers/media/platform/exynos4-is/common.c b/drivers/media/platform/exynos4-is/common.c index 0ec210b4da1d..0eb34ecb8ee4 100644 --- a/drivers/media/platform/exynos4-is/common.c +++ b/drivers/media/platform/exynos4-is/common.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include "common.h" /* Called with the media graph mutex held or entity->stream_count > 0. */ diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h index 1790fb4e32ea..6c75c6ced1f7 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.h +++ b/drivers/media/platform/exynos4-is/fimc-core.h @@ -27,7 +27,7 @@ #include #include #include -#include +#include #define dbg(fmt, args...) \ pr_debug("%s:%d: " fmt "\n", __func__, __LINE__, ##args) diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index ced46600e343..93f9cf2ebcd6 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include "common.h" #include "media-dev.h" diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h index 4dc55a18d978..b99be09b49fc 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.h +++ b/drivers/media/platform/exynos4-is/fimc-isp.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include extern int fimc_isp_debug; diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c index d0dc7ee04452..bc3ec7d25a32 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c +++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "fimc-lite-reg.h" #include "fimc-lite.h" diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index 630aef52dbb8..a97d2352f1d7 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include "common.h" #include "fimc-core.h" diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h index 7428b2d22b52..ea19dc7be63e 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.h +++ b/drivers/media/platform/exynos4-is/fimc-lite.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #define FIMC_LITE_DRV_NAME "exynos-fimc-lite" #define FLITE_CLK_NAME "flite" diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c index 1db8cb4c46ef..2d77fd8f440a 100644 --- a/drivers/media/platform/exynos4-is/fimc-reg.c +++ b/drivers/media/platform/exynos4-is/fimc-reg.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include "media-dev.h" #include "fimc-reg.h" diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 6e2d6042ade6..344718df5c62 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include "media-dev.h" #include "fimc-core.h" @@ -39,10 +39,6 @@ #include "fimc-lite.h" #include "mipi-csis.h" -static int __fimc_md_set_camclk(struct fimc_md *fmd, - struct fimc_source_info *si, - bool on); - /* Set up image sensor subdev -> FIMC capture node notifications. */ static void __setup_sensor_notification(struct fimc_md *fmd, struct v4l2_subdev *sensor, @@ -223,17 +219,10 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep, return ret; } - ret = fimc_md_set_camclk(sd, true); - if (ret < 0) - goto err_wbclk; - ret = fimc_pipeline_s_power(p, 1); if (!ret) return 0; - fimc_md_set_camclk(sd, false); - -err_wbclk: if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); @@ -259,7 +248,6 @@ static int __fimc_pipeline_close(struct exynos_media_pipeline *ep) } ret = fimc_pipeline_s_power(p, 0); - fimc_md_set_camclk(sd, false); fmd = entity_to_fimc_mdev(&sd->entity); @@ -337,75 +325,14 @@ static void fimc_md_pipelines_free(struct fimc_md *fmd) } } -/* - * Sensor subdevice helper functions - */ -static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd, - struct fimc_source_info *si) -{ - struct i2c_adapter *adapter; - struct v4l2_subdev *sd = NULL; - - if (!si || !fmd) - return NULL; - /* - * If FIMC bus type is not Writeback FIFO assume it is same - * as sensor_bus_type. - */ - si->fimc_bus_type = si->sensor_bus_type; - - adapter = i2c_get_adapter(si->i2c_bus_num); - if (!adapter) { - v4l2_warn(&fmd->v4l2_dev, - "Failed to get I2C adapter %d, deferring probe\n", - si->i2c_bus_num); - return ERR_PTR(-EPROBE_DEFER); - } - sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter, - si->board_info, NULL); - if (IS_ERR_OR_NULL(sd)) { - i2c_put_adapter(adapter); - v4l2_warn(&fmd->v4l2_dev, - "Failed to acquire subdev %s, deferring probe\n", - si->board_info->type); - return ERR_PTR(-EPROBE_DEFER); - } - v4l2_set_subdev_hostdata(sd, si); - sd->grp_id = GRP_ID_SENSOR; - - v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice %s\n", - sd->name); - return sd; -} - -static void fimc_md_unregister_sensor(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct i2c_adapter *adapter; - - if (!client || client->dev.of_node) - return; - - v4l2_device_unregister_subdev(sd); - - adapter = client->adapter; - i2c_unregister_device(client); - if (adapter) - i2c_put_adapter(adapter); -} - -#ifdef CONFIG_OF /* Parse port node and register as a sub-device any sensor specified there. */ static int fimc_md_parse_port_node(struct fimc_md *fmd, struct device_node *port, unsigned int index) { + struct fimc_source_info *pd = &fmd->sensor[index].pdata; struct device_node *rem, *ep, *np; - struct fimc_source_info *pd; struct v4l2_of_endpoint endpoint; - u32 val; - - pd = &fmd->sensor[index].pdata; /* Assume here a port node can have only one endpoint node. */ ep = of_get_next_child(port, NULL); @@ -425,20 +352,6 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd, ep->full_name); return 0; } - if (!of_property_read_u32(rem, "samsung,camclk-out", &val)) - pd->clk_id = val; - - if (!of_property_read_u32(rem, "clock-frequency", &val)) - pd->clk_frequency = val; - else - pd->clk_frequency = DEFAULT_SENSOR_CLK_FREQ; - - if (pd->clk_frequency == 0) { - v4l2_err(&fmd->v4l2_dev, "Wrong clock frequency at node %s\n", - rem->full_name); - of_node_put(rem); - return -EINVAL; - } if (fimc_input_is_parallel(endpoint.base.port)) { if (endpoint.bus_type == V4L2_MBUS_PARALLEL) @@ -485,14 +398,26 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd, } /* Register all SoC external sub-devices */ -static int fimc_md_of_sensors_register(struct fimc_md *fmd, - struct device_node *np) +static int fimc_md_register_sensor_entities(struct fimc_md *fmd) { struct device_node *parent = fmd->pdev->dev.of_node; struct device_node *node, *ports; int index = 0; int ret; + /* + * Runtime resume one of the FIMC entities to make sure + * the sclk_cam clocks are not globally disabled. + */ + if (!fmd->pmf) + return -ENXIO; + + ret = pm_runtime_get_sync(fmd->pmf); + if (ret < 0) + return ret; + + fmd->num_sensors = 0; + /* Attach sensors linked to MIPI CSI-2 receivers */ for_each_available_child_of_node(parent, node) { struct device_node *port; @@ -506,14 +431,14 @@ static int fimc_md_of_sensors_register(struct fimc_md *fmd, ret = fimc_md_parse_port_node(fmd, port, index); if (ret < 0) - return ret; + goto rpm_put; index++; } /* Attach sensors listed in the parallel-ports node */ ports = of_get_child_by_name(parent, "parallel-ports"); if (!ports) - return 0; + goto rpm_put; for_each_child_of_node(ports, node) { ret = fimc_md_parse_port_node(fmd, node, index); @@ -521,8 +446,9 @@ static int fimc_md_of_sensors_register(struct fimc_md *fmd, break; index++; } - - return 0; +rpm_put: + pm_runtime_put(fmd->pmf); + return ret; } static int __of_get_csis_id(struct device_node *np) @@ -535,68 +461,10 @@ static int __of_get_csis_id(struct device_node *np) of_property_read_u32(np, "reg", ®); return reg - FIMC_INPUT_MIPI_CSI2_0; } -#else -#define fimc_md_of_sensors_register(fmd, np) (-ENOSYS) -#define __of_get_csis_id(np) (-ENOSYS) -#endif - -static int fimc_md_register_sensor_entities(struct fimc_md *fmd) -{ - struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data; - struct device_node *of_node = fmd->pdev->dev.of_node; - int num_clients = 0; - int ret, i; - - /* - * Runtime resume one of the FIMC entities to make sure - * the sclk_cam clocks are not globally disabled. - */ - if (!fmd->pmf) - return -ENXIO; - - ret = pm_runtime_get_sync(fmd->pmf); - if (ret < 0) - return ret; - - if (of_node) { - fmd->num_sensors = 0; - ret = fimc_md_of_sensors_register(fmd, of_node); - } else if (pdata) { - WARN_ON(pdata->num_clients > ARRAY_SIZE(fmd->sensor)); - num_clients = min_t(u32, pdata->num_clients, - ARRAY_SIZE(fmd->sensor)); - fmd->num_sensors = num_clients; - - for (i = 0; i < num_clients; i++) { - struct fimc_sensor_info *si = &fmd->sensor[i]; - struct v4l2_subdev *sd; - - si->pdata = pdata->source_info[i]; - ret = __fimc_md_set_camclk(fmd, &si->pdata, true); - if (ret) - break; - sd = fimc_md_register_sensor(fmd, &si->pdata); - ret = __fimc_md_set_camclk(fmd, &si->pdata, false); - - if (IS_ERR(sd)) { - si->subdev = NULL; - ret = PTR_ERR(sd); - break; - } - si->subdev = sd; - if (ret) - break; - } - } - - pm_runtime_put(fmd->pmf); - return ret; -} /* * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration. */ - static int register_fimc_lite_entity(struct fimc_md *fmd, struct fimc_lite *fimc_lite) { @@ -753,35 +621,9 @@ dev_unlock: return ret; } -static int fimc_md_pdev_match(struct device *dev, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - int plat_entity = -1; - int ret; - char *p; - - if (!get_device(dev)) - return -ENODEV; - - if (!strcmp(pdev->name, CSIS_DRIVER_NAME)) { - plat_entity = IDX_CSIS; - } else { - p = strstr(pdev->name, "fimc"); - if (p && *(p + 4) == 0) - plat_entity = IDX_FIMC; - } - - if (plat_entity >= 0) - ret = fimc_md_register_platform_entity(data, pdev, - plat_entity); - put_device(dev); - return 0; -} - /* Register FIMC, FIMC-LITE and CSIS media entities */ -#ifdef CONFIG_OF -static int fimc_md_register_of_platform_entities(struct fimc_md *fmd, - struct device_node *parent) +static int fimc_md_register_platform_entities(struct fimc_md *fmd, + struct device_node *parent) { struct device_node *node; int ret = 0; @@ -815,9 +657,6 @@ static int fimc_md_register_of_platform_entities(struct fimc_md *fmd, return ret; } -#else -#define fimc_md_register_of_platform_entities(fmd, node) (-ENOSYS) -#endif static void fimc_md_unregister_entities(struct fimc_md *fmd) { @@ -845,14 +684,6 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd) v4l2_device_unregister_subdev(fmd->csis[i].sd); fmd->csis[i].sd = NULL; } - if (fmd->pdev->dev.of_node == NULL) { - for (i = 0; i < fmd->num_sensors; i++) { - if (fmd->sensor[i].subdev == NULL) - continue; - fimc_md_unregister_sensor(fmd->sensor[i].subdev); - fmd->sensor[i].subdev = NULL; - } - } if (fmd->fimc_is) v4l2_device_unregister_subdev(&fmd->fimc_is->isp.subdev); @@ -1137,7 +968,7 @@ static void fimc_md_put_clocks(struct fimc_md *fmd) static int fimc_md_get_clocks(struct fimc_md *fmd) { - struct device *dev = NULL; + struct device *dev = &fmd->pdev->dev; char clk_name[32]; struct clk *clock; int i, ret = 0; @@ -1145,16 +976,12 @@ static int fimc_md_get_clocks(struct fimc_md *fmd) for (i = 0; i < FIMC_MAX_CAMCLKS; i++) fmd->camclk[i].clock = ERR_PTR(-EINVAL); - if (fmd->pdev->dev.of_node) - dev = &fmd->pdev->dev; - for (i = 0; i < FIMC_MAX_CAMCLKS; i++) { snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i); clock = clk_get(dev, clk_name); if (IS_ERR(clock)) { - dev_err(&fmd->pdev->dev, "Failed to get clock: %s\n", - clk_name); + dev_err(dev, "Failed to get clock: %s\n", clk_name); ret = PTR_ERR(clock); break; } @@ -1188,86 +1015,6 @@ static int fimc_md_get_clocks(struct fimc_md *fmd) return ret; } -static int __fimc_md_set_camclk(struct fimc_md *fmd, - struct fimc_source_info *si, - bool on) -{ - struct fimc_camclk_info *camclk; - int ret = 0; - - /* - * When device tree is used the sensor drivers are supposed to - * control the clock themselves. This whole function will be - * removed once S5PV210 platform is converted to the device tree. - */ - if (fmd->pdev->dev.of_node) - return 0; - - if (WARN_ON(si->clk_id >= FIMC_MAX_CAMCLKS) || !fmd || !fmd->pmf) - return -EINVAL; - - camclk = &fmd->camclk[si->clk_id]; - - dbg("camclk %d, f: %lu, use_count: %d, on: %d", - si->clk_id, si->clk_frequency, camclk->use_count, on); - - if (on) { - if (camclk->use_count > 0 && - camclk->frequency != si->clk_frequency) - return -EINVAL; - - if (camclk->use_count++ == 0) { - clk_set_rate(camclk->clock, si->clk_frequency); - camclk->frequency = si->clk_frequency; - ret = pm_runtime_get_sync(fmd->pmf); - if (ret < 0) - return ret; - ret = clk_prepare_enable(camclk->clock); - dbg("Enabled camclk %d: f: %lu", si->clk_id, - clk_get_rate(camclk->clock)); - } - return ret; - } - - if (WARN_ON(camclk->use_count == 0)) - return 0; - - if (--camclk->use_count == 0) { - clk_disable_unprepare(camclk->clock); - pm_runtime_put(fmd->pmf); - dbg("Disabled camclk %d", si->clk_id); - } - return ret; -} - -/** - * fimc_md_set_camclk - peripheral sensor clock setup - * @sd: sensor subdev to configure sclk_cam clock for - * @on: 1 to enable or 0 to disable the clock - * - * There are 2 separate clock outputs available in the SoC for external - * image processors. These clocks are shared between all registered FIMC - * devices to which sensors can be attached, either directly or through - * the MIPI CSI receiver. The clock is allowed here to be used by - * multiple sensors concurrently if they use same frequency. - * This function should only be called when the graph mutex is held. - */ -int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on) -{ - struct fimc_source_info *si = v4l2_get_subdev_hostdata(sd); - struct fimc_md *fmd = entity_to_fimc_mdev(&sd->entity); - - /* - * If there is a clock provider registered the sensors will - * handle their clock themselves, no need to control it on - * the host interface side. - */ - if (fmd->clk_provider.num_clocks > 0) - return 0; - - return __fimc_md_set_camclk(fmd, si, on); -} - static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable) { struct exynos_video_entity *ve; @@ -1426,7 +1173,6 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) return 0; } -#ifdef CONFIG_OF static int cam_clk_prepare(struct clk_hw *hw) { struct cam_clk *camclk = to_cam_clk(hw); @@ -1518,10 +1264,6 @@ err: fimc_md_unregister_clk_provider(fmd); return ret; } -#else -#define fimc_md_register_clk_provider(fmd) (0) -#define fimc_md_unregister_clk_provider(fmd) -#endif static int subdev_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, @@ -1585,8 +1327,8 @@ static int fimc_md_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&fmd->slock); - fmd->pdev = pdev; INIT_LIST_HEAD(&fmd->pipelines); + fmd->pdev = pdev; strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC", sizeof(fmd->media_dev.model)); @@ -1599,6 +1341,7 @@ static int fimc_md_probe(struct platform_device *pdev) strlcpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name)); fmd->use_isp = fimc_md_is_isp_available(dev->of_node); + fmd->user_subdev_api = true; ret = v4l2_device_register(dev, &fmd->v4l2_dev); if (ret < 0) { @@ -1616,8 +1359,6 @@ static int fimc_md_probe(struct platform_device *pdev) if (ret) goto err_md; - fmd->user_subdev_api = (dev->of_node != NULL); - ret = fimc_md_get_pinctrl(fmd); if (ret < 0) { if (ret != EPROBE_DEFER) @@ -1630,22 +1371,16 @@ static int fimc_md_probe(struct platform_device *pdev) /* Protect the media graph while we're registering entities */ mutex_lock(&fmd->media_dev.graph_mutex); - if (dev->of_node) - ret = fimc_md_register_of_platform_entities(fmd, dev->of_node); - else - ret = bus_for_each_dev(&platform_bus_type, NULL, fmd, - fimc_md_pdev_match); + ret = fimc_md_register_platform_entities(fmd, dev->of_node); if (ret) { mutex_unlock(&fmd->media_dev.graph_mutex); goto err_clk; } - if (dev->platform_data || dev->of_node) { - ret = fimc_md_register_sensor_entities(fmd); - if (ret) { - mutex_unlock(&fmd->media_dev.graph_mutex); - goto err_m_ent; - } + ret = fimc_md_register_sensor_entities(fmd); + if (ret) { + mutex_unlock(&fmd->media_dev.graph_mutex); + goto err_m_ent; } mutex_unlock(&fmd->media_dev.graph_mutex); diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h index 58c49456b13f..03214541f149 100644 --- a/drivers/media/platform/exynos4-is/media-dev.h +++ b/drivers/media/platform/exynos4-is/media-dev.h @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "fimc-core.h" #include "fimc-lite.h" @@ -94,9 +94,7 @@ struct fimc_sensor_info { }; struct cam_clk { -#ifdef CONFIG_COMMON_CLK struct clk_hw hw; -#endif struct fimc_md *fmd; }; #define to_cam_clk(_hw) container_of(_hw, struct cam_clk, hw) @@ -144,9 +142,7 @@ struct fimc_md { struct cam_clk_provider { struct clk *clks[FIMC_MAX_CAMCLKS]; -#ifdef CONFIG_COMMON_CLK struct clk_onecell_data clk_data; -#endif struct device_node *of_node; struct cam_clk camclk[FIMC_MAX_CAMCLKS]; int num_clocks; diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index 3678ba59725c..ae54ef5f535d 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -22,14 +22,13 @@ #include #include #include -#include #include #include #include #include #include #include -#include +#include #include #include @@ -730,26 +729,6 @@ static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static int s5pcsis_get_platform_data(struct platform_device *pdev, - struct csis_state *state) -{ - struct s5p_platform_mipi_csis *pdata = pdev->dev.platform_data; - - if (pdata == NULL) { - dev_err(&pdev->dev, "Platform data not specified\n"); - return -EINVAL; - } - - state->clk_frequency = pdata->clk_rate; - state->num_lanes = pdata->lanes; - state->hs_settle = pdata->hs_settle; - state->index = max(0, pdev->id); - state->max_num_lanes = state->index ? CSIS1_MAX_LANES : - CSIS0_MAX_LANES; - return 0; -} - -#ifdef CONFIG_OF static int s5pcsis_parse_dt(struct platform_device *pdev, struct csis_state *state) { @@ -787,9 +766,6 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, return 0; } -#else -#define s5pcsis_parse_dt(pdev, state) (-ENOSYS) -#endif static int s5pcsis_pm_resume(struct device *dev, bool runtime); static const struct of_device_id s5pcsis_of_match[]; @@ -812,19 +788,14 @@ static int s5pcsis_probe(struct platform_device *pdev) spin_lock_init(&state->slock); state->pdev = pdev; - if (dev->of_node) { - of_id = of_match_node(s5pcsis_of_match, dev->of_node); - if (WARN_ON(of_id == NULL)) - return -EINVAL; - - drv_data = of_id->data; - state->interrupt_mask = drv_data->interrupt_mask; + of_id = of_match_node(s5pcsis_of_match, dev->of_node); + if (WARN_ON(of_id == NULL)) + return -EINVAL; - ret = s5pcsis_parse_dt(pdev, state); - } else { - ret = s5pcsis_get_platform_data(pdev, state); - } + drv_data = of_id->data; + state->interrupt_mask = drv_data->interrupt_mask; + ret = s5pcsis_parse_dt(pdev, state); if (ret < 0) return ret; diff --git a/include/linux/platform_data/mipi-csis.h b/include/linux/platform_data/mipi-csis.h deleted file mode 100644 index c2fd9024717c..000000000000 --- a/include/linux/platform_data/mipi-csis.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd. - * - * Samsung S5P/Exynos SoC series MIPI CSIS device support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __PLAT_SAMSUNG_MIPI_CSIS_H_ -#define __PLAT_SAMSUNG_MIPI_CSIS_H_ __FILE__ - -/** - * struct s5p_platform_mipi_csis - platform data for S5P MIPI-CSIS driver - * @clk_rate: bus clock frequency - * @wclk_source: CSI wrapper clock selection: 0 - bus clock, 1 - ext. SCLK_CAM - * @lanes: number of data lanes used - * @hs_settle: HS-RX settle time - */ -struct s5p_platform_mipi_csis { - unsigned long clk_rate; - u8 wclk_source; - u8 lanes; - u8 hs_settle; -}; - -#endif /* __PLAT_SAMSUNG_MIPI_CSIS_H_ */ diff --git a/include/media/exynos-fimc.h b/include/media/exynos-fimc.h new file mode 100644 index 000000000000..aa44660e2041 --- /dev/null +++ b/include/media/exynos-fimc.h @@ -0,0 +1,161 @@ +/* + * Samsung S5P/Exynos4 SoC series camera interface driver header + * + * Copyright (C) 2010 - 2013 Samsung Electronics Co., Ltd. + * Sylwester Nawrocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef S5P_FIMC_H_ +#define S5P_FIMC_H_ + +#include +#include +#include + +/* + * Enumeration of data inputs to the camera subsystem. + */ +enum fimc_input { + FIMC_INPUT_PARALLEL_0 = 1, + FIMC_INPUT_PARALLEL_1, + FIMC_INPUT_MIPI_CSI2_0 = 3, + FIMC_INPUT_MIPI_CSI2_1, + FIMC_INPUT_WRITEBACK_A = 5, + FIMC_INPUT_WRITEBACK_B, + FIMC_INPUT_WRITEBACK_ISP = 5, +}; + +/* + * Enumeration of the FIMC data bus types. + */ +enum fimc_bus_type { + /* Camera parallel bus */ + FIMC_BUS_TYPE_ITU_601 = 1, + /* Camera parallel bus with embedded synchronization */ + FIMC_BUS_TYPE_ITU_656, + /* Camera MIPI-CSI2 serial bus */ + FIMC_BUS_TYPE_MIPI_CSI2, + /* FIFO link from LCD controller (WriteBack A) */ + FIMC_BUS_TYPE_LCD_WRITEBACK_A, + /* FIFO link from LCD controller (WriteBack B) */ + FIMC_BUS_TYPE_LCD_WRITEBACK_B, + /* FIFO link from FIMC-IS */ + FIMC_BUS_TYPE_ISP_WRITEBACK = FIMC_BUS_TYPE_LCD_WRITEBACK_B, +}; + +#define fimc_input_is_parallel(x) ((x) == 1 || (x) == 2) +#define fimc_input_is_mipi_csi(x) ((x) == 3 || (x) == 4) + +/* + * The subdevices' group IDs. + */ +#define GRP_ID_SENSOR (1 << 8) +#define GRP_ID_FIMC_IS_SENSOR (1 << 9) +#define GRP_ID_WRITEBACK (1 << 10) +#define GRP_ID_CSIS (1 << 11) +#define GRP_ID_FIMC (1 << 12) +#define GRP_ID_FLITE (1 << 13) +#define GRP_ID_FIMC_IS (1 << 14) + +/** + * struct fimc_source_info - video source description required for the host + * interface configuration + * + * @fimc_bus_type: FIMC camera input type + * @sensor_bus_type: image sensor bus type, MIPI, ITU-R BT.601 etc. + * @flags: the parallel sensor bus flags defining signals polarity (V4L2_MBUS_*) + * @mux_id: FIMC camera interface multiplexer index (separate for MIPI and ITU) + */ +struct fimc_source_info { + enum fimc_bus_type fimc_bus_type; + enum fimc_bus_type sensor_bus_type; + u16 flags; + u16 mux_id; +}; + +/* + * v4l2_device notification id. This is only for internal use in the kernel. + * Sensor subdevs should issue S5P_FIMC_TX_END_NOTIFY notification in single + * frame capture mode when there is only one VSYNC pulse issued by the sensor + * at begining of the frame transmission. + */ +#define S5P_FIMC_TX_END_NOTIFY _IO('e', 0) + +#define FIMC_MAX_PLANES 3 + +/** + * struct fimc_fmt - color format data structure + * @mbus_code: media bus pixel code, -1 if not applicable + * @name: format description + * @fourcc: fourcc code for this format, 0 if not applicable + * @color: the driver's private color format id + * @memplanes: number of physically non-contiguous data planes + * @colplanes: number of physically contiguous data planes + * @colorspace: v4l2 colorspace (V4L2_COLORSPACE_*) + * @depth: per plane driver's private 'number of bits per pixel' + * @mdataplanes: bitmask indicating meta data plane(s), (1 << plane_no) + * @flags: flags indicating which operation mode format applies to + */ +struct fimc_fmt { + enum v4l2_mbus_pixelcode mbus_code; + char *name; + u32 fourcc; + u32 color; + u16 memplanes; + u16 colplanes; + u8 colorspace; + u8 depth[FIMC_MAX_PLANES]; + u16 mdataplanes; + u16 flags; +#define FMT_FLAGS_CAM (1 << 0) +#define FMT_FLAGS_M2M_IN (1 << 1) +#define FMT_FLAGS_M2M_OUT (1 << 2) +#define FMT_FLAGS_M2M (1 << 1 | 1 << 2) +#define FMT_HAS_ALPHA (1 << 3) +#define FMT_FLAGS_COMPRESSED (1 << 4) +#define FMT_FLAGS_WRITEBACK (1 << 5) +#define FMT_FLAGS_RAW_BAYER (1 << 6) +#define FMT_FLAGS_YUV (1 << 7) +}; + +struct exynos_media_pipeline; + +/* + * Media pipeline operations to be called from within a video node, i.e. the + * last entity within the pipeline. Implemented by related media device driver. + */ +struct exynos_media_pipeline_ops { + int (*prepare)(struct exynos_media_pipeline *p, + struct media_entity *me); + int (*unprepare)(struct exynos_media_pipeline *p); + int (*open)(struct exynos_media_pipeline *p, struct media_entity *me, + bool resume); + int (*close)(struct exynos_media_pipeline *p); + int (*set_stream)(struct exynos_media_pipeline *p, bool state); +}; + +struct exynos_video_entity { + struct video_device vdev; + struct exynos_media_pipeline *pipe; +}; + +struct exynos_media_pipeline { + struct media_pipeline mp; + const struct exynos_media_pipeline_ops *ops; +}; + +static inline struct exynos_video_entity *vdev_to_exynos_video_entity( + struct video_device *vdev) +{ + return container_of(vdev, struct exynos_video_entity, vdev); +} + +#define fimc_pipeline_call(ent, op, args...) \ + (!(ent) ? -ENOENT : (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ + (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \ + +#endif /* S5P_FIMC_H_ */ diff --git a/include/media/s5p_fimc.h b/include/media/s5p_fimc.h deleted file mode 100644 index b975c285c8a9..000000000000 --- a/include/media/s5p_fimc.h +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Samsung S5P/Exynos4 SoC series camera interface driver header - * - * Copyright (C) 2010 - 2013 Samsung Electronics Co., Ltd. - * Sylwester Nawrocki - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef S5P_FIMC_H_ -#define S5P_FIMC_H_ - -#include -#include -#include - -/* - * Enumeration of data inputs to the camera subsystem. - */ -enum fimc_input { - FIMC_INPUT_PARALLEL_0 = 1, - FIMC_INPUT_PARALLEL_1, - FIMC_INPUT_MIPI_CSI2_0 = 3, - FIMC_INPUT_MIPI_CSI2_1, - FIMC_INPUT_WRITEBACK_A = 5, - FIMC_INPUT_WRITEBACK_B, - FIMC_INPUT_WRITEBACK_ISP = 5, -}; - -/* - * Enumeration of the FIMC data bus types. - */ -enum fimc_bus_type { - /* Camera parallel bus */ - FIMC_BUS_TYPE_ITU_601 = 1, - /* Camera parallel bus with embedded synchronization */ - FIMC_BUS_TYPE_ITU_656, - /* Camera MIPI-CSI2 serial bus */ - FIMC_BUS_TYPE_MIPI_CSI2, - /* FIFO link from LCD controller (WriteBack A) */ - FIMC_BUS_TYPE_LCD_WRITEBACK_A, - /* FIFO link from LCD controller (WriteBack B) */ - FIMC_BUS_TYPE_LCD_WRITEBACK_B, - /* FIFO link from FIMC-IS */ - FIMC_BUS_TYPE_ISP_WRITEBACK = FIMC_BUS_TYPE_LCD_WRITEBACK_B, -}; - -#define fimc_input_is_parallel(x) ((x) == 1 || (x) == 2) -#define fimc_input_is_mipi_csi(x) ((x) == 3 || (x) == 4) - -/* - * The subdevices' group IDs. - */ -#define GRP_ID_SENSOR (1 << 8) -#define GRP_ID_FIMC_IS_SENSOR (1 << 9) -#define GRP_ID_WRITEBACK (1 << 10) -#define GRP_ID_CSIS (1 << 11) -#define GRP_ID_FIMC (1 << 12) -#define GRP_ID_FLITE (1 << 13) -#define GRP_ID_FIMC_IS (1 << 14) - -struct i2c_board_info; - -/** - * struct fimc_source_info - video source description required for the host - * interface configuration - * - * @board_info: pointer to I2C subdevice's board info - * @clk_frequency: frequency of the clock the host interface provides to sensor - * @fimc_bus_type: FIMC camera input type - * @sensor_bus_type: image sensor bus type, MIPI, ITU-R BT.601 etc. - * @flags: the parallel sensor bus flags defining signals polarity (V4L2_MBUS_*) - * @i2c_bus_num: i2c control bus id the sensor is attached to - * @mux_id: FIMC camera interface multiplexer index (separate for MIPI and ITU) - * @clk_id: index of the SoC peripheral clock for sensors - */ -struct fimc_source_info { - struct i2c_board_info *board_info; - unsigned long clk_frequency; - enum fimc_bus_type fimc_bus_type; - enum fimc_bus_type sensor_bus_type; - u16 flags; - u16 i2c_bus_num; - u16 mux_id; - u8 clk_id; -}; - -/** - * struct s5p_platform_fimc - camera host interface platform data - * - * @source_info: properties of an image source for the host interface setup - * @num_clients: the number of attached image sources - */ -struct s5p_platform_fimc { - struct fimc_source_info *source_info; - int num_clients; -}; - -/* - * v4l2_device notification id. This is only for internal use in the kernel. - * Sensor subdevs should issue S5P_FIMC_TX_END_NOTIFY notification in single - * frame capture mode when there is only one VSYNC pulse issued by the sensor - * at begining of the frame transmission. - */ -#define S5P_FIMC_TX_END_NOTIFY _IO('e', 0) - -#define FIMC_MAX_PLANES 3 - -/** - * struct fimc_fmt - color format data structure - * @mbus_code: media bus pixel code, -1 if not applicable - * @name: format description - * @fourcc: fourcc code for this format, 0 if not applicable - * @color: the driver's private color format id - * @memplanes: number of physically non-contiguous data planes - * @colplanes: number of physically contiguous data planes - * @colorspace: v4l2 colorspace (V4L2_COLORSPACE_*) - * @depth: per plane driver's private 'number of bits per pixel' - * @mdataplanes: bitmask indicating meta data plane(s), (1 << plane_no) - * @flags: flags indicating which operation mode format applies to - */ -struct fimc_fmt { - enum v4l2_mbus_pixelcode mbus_code; - char *name; - u32 fourcc; - u32 color; - u16 memplanes; - u16 colplanes; - u8 colorspace; - u8 depth[FIMC_MAX_PLANES]; - u16 mdataplanes; - u16 flags; -#define FMT_FLAGS_CAM (1 << 0) -#define FMT_FLAGS_M2M_IN (1 << 1) -#define FMT_FLAGS_M2M_OUT (1 << 2) -#define FMT_FLAGS_M2M (1 << 1 | 1 << 2) -#define FMT_HAS_ALPHA (1 << 3) -#define FMT_FLAGS_COMPRESSED (1 << 4) -#define FMT_FLAGS_WRITEBACK (1 << 5) -#define FMT_FLAGS_RAW_BAYER (1 << 6) -#define FMT_FLAGS_YUV (1 << 7) -}; - -struct exynos_media_pipeline; - -/* - * Media pipeline operations to be called from within a video node, i.e. the - * last entity within the pipeline. Implemented by related media device driver. - */ -struct exynos_media_pipeline_ops { - int (*prepare)(struct exynos_media_pipeline *p, - struct media_entity *me); - int (*unprepare)(struct exynos_media_pipeline *p); - int (*open)(struct exynos_media_pipeline *p, struct media_entity *me, - bool resume); - int (*close)(struct exynos_media_pipeline *p); - int (*set_stream)(struct exynos_media_pipeline *p, bool state); -}; - -struct exynos_video_entity { - struct video_device vdev; - struct exynos_media_pipeline *pipe; -}; - -struct exynos_media_pipeline { - struct media_pipeline mp; - const struct exynos_media_pipeline_ops *ops; -}; - -static inline struct exynos_video_entity *vdev_to_exynos_video_entity( - struct video_device *vdev) -{ - return container_of(vdev, struct exynos_video_entity, vdev); -} - -#define fimc_pipeline_call(ent, op, args...) \ - (!(ent) ? -ENOENT : (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ - (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \ - -#endif /* S5P_FIMC_H_ */ -- cgit v1.2.3 From 8cd84092d35e52372da2c3c3c2afb1a719917af2 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Fri, 9 May 2014 16:43:08 +0900 Subject: PM / devfreq: Add resource-managed function for devfreq device This patch add resource-managed function for devfreq device as following functions. The devm_devfreq_add_device() manages automatically the memory of devfreq device using device resource management. - devm_devfreq_add_device() - devm_devfreq_remove_device() Signed-off-by: Chanwoo Choi Signed-off-by: MyungJoo Ham --- drivers/devfreq/devfreq.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/devfreq.h | 21 +++++++++++++++- 2 files changed, 83 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index af4af7708574..8b6295d9d1f5 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -544,6 +544,69 @@ int devfreq_remove_device(struct devfreq *devfreq) } EXPORT_SYMBOL(devfreq_remove_device); +static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) +{ + struct devfreq **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; +} + +static void devm_devfreq_dev_release(struct device *dev, void *res) +{ + devfreq_remove_device(*(struct devfreq **)res); +} + +/** + * devm_devfreq_add_device() - Resource-managed devfreq_add_device() + * @dev: the device to add devfreq feature. + * @profile: device-specific profile to run devfreq. + * @governor_name: name of the policy to choose frequency. + * @data: private data for the governor. The devfreq framework does not + * touch this value. + * + * This function manages automatically the memory of devfreq device using device + * resource management and simplify the free operation for memory of devfreq + * device. + */ +struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data) +{ + struct devfreq **ptr, *devfreq; + + ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + devfreq = devfreq_add_device(dev, profile, governor_name, data); + if (IS_ERR(devfreq)) { + devres_free(ptr); + return ERR_PTR(-ENOMEM); + } + + *ptr = devfreq; + devres_add(dev, ptr); + + return devfreq; +} +EXPORT_SYMBOL(devm_devfreq_add_device); + +/** + * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() + * @dev: the device to add devfreq feature. + * @devfreq: the devfreq instance to be removed + */ +void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) +{ + WARN_ON(devres_release(dev, devm_devfreq_dev_release, + devm_devfreq_dev_match, devfreq)); +} +EXPORT_SYMBOL(devm_devfreq_remove_device); + /** * devfreq_suspend_device() - Suspend devfreq of a device. * @devfreq: the devfreq instance to be suspended diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index d48dc00232a4..023d668a2cb5 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -181,6 +181,12 @@ extern struct devfreq *devfreq_add_device(struct device *dev, const char *governor_name, void *data); extern int devfreq_remove_device(struct devfreq *devfreq); +extern struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +extern void devm_devfreq_remove_device(struct device *dev, + struct devfreq *devfreq); /* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */ extern int devfreq_suspend_device(struct devfreq *devfreq); @@ -220,7 +226,7 @@ static inline struct devfreq *devfreq_add_device(struct device *dev, const char *governor_name, void *data) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline int devfreq_remove_device(struct devfreq *devfreq) @@ -228,6 +234,19 @@ static inline int devfreq_remove_device(struct devfreq *devfreq) return 0; } +static inline struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_devfreq_remove_device(struct device *dev, + struct devfreq *devfreq) +{ +} + static inline int devfreq_suspend_device(struct devfreq *devfreq) { return 0; -- cgit v1.2.3 From d5b040d0cab9cae1dc1ad61a07019062235f4878 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Fri, 9 May 2014 16:43:09 +0900 Subject: PM / devfreq: Add devm_devfreq_{register,unregister}_opp_notfier function This patch add resource-managed function for devfreq opp as following functions. The devm_devfreq_register_opp_notifier() manages automatically the registration of devfreq opp using device resource management. - devm_devfreq_register_opp_notifier - devm_devfreq_unregister_opp_notifier() Signed-off-by: Chanwoo Choi Signed-off-by: MyungJoo Ham --- drivers/devfreq/devfreq.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/devfreq.h | 14 ++++++++++++++ 2 files changed, 62 insertions(+) (limited to 'include/linux') diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 8b6295d9d1f5..9f90369dd6bd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1169,6 +1169,54 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) return ret; } +static void devm_devfreq_opp_release(struct device *dev, void *res) +{ + devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); +} + +/** + * devm_ devfreq_register_opp_notifier() + * - Resource-managed devfreq_register_opp_notifier() + * @dev: The devfreq user device. (parent of devfreq) + * @devfreq: The devfreq object. + */ +int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + struct devfreq **ptr; + int ret; + + ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = devfreq_register_opp_notifier(dev, devfreq); + if (ret) { + devres_free(ptr); + return ret; + } + + *ptr = devfreq; + devres_add(dev, ptr); + + return 0; +} +EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); + +/** + * devm_devfreq_unregister_opp_notifier() + * - Resource-managed devfreq_unregister_opp_notifier() + * @dev: The devfreq user device. (parent of devfreq) + * @devfreq: The devfreq object. + */ +void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + WARN_ON(devres_release(dev, devm_devfreq_opp_release, + devm_devfreq_dev_match, devfreq)); +} +EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); + MODULE_AUTHOR("MyungJoo Ham "); MODULE_DESCRIPTION("devfreq class support"); MODULE_LICENSE("GPL"); diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 023d668a2cb5..f1863dcd83ea 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -199,6 +199,10 @@ extern int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq); extern int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +extern int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** @@ -275,6 +279,16 @@ static inline int devfreq_unregister_opp_notifier(struct device *dev, return -EINVAL; } +static inline int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + return -EINVAL; +} + +static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ +} #endif /* CONFIG_PM_DEVFREQ */ #endif /* __LINUX_DEVFREQ_H__ */ -- cgit v1.2.3 From 72e6ae285a1dbff553734985bedadf409d99c02d Mon Sep 17 00:00:00 2001 From: Victor Kamensky Date: Tue, 29 Apr 2014 04:20:52 +0100 Subject: ARM: 8043/1: uprobes need icache flush after xol write After instruction write into xol area, on ARM V7 architecture code need to flush dcache and icache to sync them up for given set of addresses. Having just 'flush_dcache_page(page)' call is not enough - it is possible to have stale instruction sitting in icache for given xol area slot address. Introduce arch_uprobe_ixol_copy weak function that by default calls uprobes copy_to_page function and than flush_dcache_page function and on ARM define new one that handles xol slot copy in ARM specific way flush_uprobe_xol_access function shares/reuses implementation with/of flush_ptrace_access function and takes care of writing instruction to user land address space on given variety of different cache types on ARM CPUs. Because flush_uprobe_xol_access does not have vma around flush_ptrace_access was split into two parts. First that retrieves set of condition from vma and common that receives those conditions as flags. Note ARM cache flush function need kernel address through which instruction write happened, so instead of using uprobes copy_to_page function changed code to explicitly map page and do memcpy. Note arch_uprobe_copy_ixol function, in similar way as copy_to_user_page function, has preempt_disable/preempt_enable. Signed-off-by: Victor Kamensky Acked-by: Oleg Nesterov Reviewed-by: David A. Long Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 2 ++ arch/arm/kernel/uprobes.c | 20 ++++++++++++++++++++ arch/arm/mm/flush.c | 33 ++++++++++++++++++++++++++++----- include/linux/uprobes.h | 3 +++ kernel/events/uprobes.c | 25 +++++++++++++++++-------- 5 files changed, 70 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 00af9fe435e6..fd43f7f55b70 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, + void *kaddr, unsigned long len); #endif diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/kernel/uprobes.c index f9bacee973bf..56adf9c1fde0 100644 --- a/arch/arm/kernel/uprobes.c +++ b/arch/arm/kernel/uprobes.c @@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, return 0; } +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + void *xol_page_kaddr = kmap_atomic(page); + void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); + + preempt_disable(); + + /* Initialize the slot */ + memcpy(dst, src, len); + + /* flush caches (dcache/icache) */ + flush_uprobe_xol_access(page, vaddr, dst, len); + + preempt_enable(); + + kunmap_atomic(xol_page_kaddr); +} + + int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 3387e60e4ea3..43d54f5b26b9 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -104,17 +104,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig #define flush_icache_alias(pfn,vaddr,len) do { } while (0) #endif +#define FLAG_PA_IS_EXEC 1 +#define FLAG_PA_CORE_IN_MM 2 + static void flush_ptrace_access_other(void *args) { __flush_icache_all(); } -static -void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, unsigned long len) +static inline +void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, + unsigned long len, unsigned int flags) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + if (flags & FLAG_PA_CORE_IN_MM) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } @@ -128,7 +131,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing D-cache */ - if (vma->vm_flags & VM_EXEC) { + if (flags & FLAG_PA_IS_EXEC) { unsigned long addr = (unsigned long)kaddr; if (icache_is_vipt_aliasing()) flush_icache_alias(page_to_pfn(page), uaddr, len); @@ -140,6 +143,26 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } } +static +void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, unsigned long len) +{ + unsigned int flags = 0; + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + flags |= FLAG_PA_CORE_IN_MM; + if (vma->vm_flags & VM_EXEC) + flags |= FLAG_PA_IS_EXEC; + __flush_ptrace_access(page, uaddr, kaddr, len, flags); +} + +void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, + void *kaddr, unsigned long len) +{ + unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; + + __flush_ptrace_access(page, uaddr, kaddr, len, flags); +} + /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index edff2b97b864..c52f827ba6ce 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -32,6 +32,7 @@ struct vm_area_struct; struct mm_struct; struct inode; struct notifier_block; +struct page; #define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_MASK 1 @@ -127,6 +128,8 @@ extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned l extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); +extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len); #else /* !CONFIG_UPROBES */ struct uprobes_state { }; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 04709b66369d..4968213c63fa 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1296,14 +1296,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) if (unlikely(!xol_vaddr)) return 0; - /* Initialize the slot */ - copy_to_page(area->page, xol_vaddr, - &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); - /* - * We probably need flush_icache_user_range() but it needs vma. - * This should work on supported architectures too. - */ - flush_dcache_page(area->page); + arch_uprobe_copy_ixol(area->page, xol_vaddr, + &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); return xol_vaddr; } @@ -1346,6 +1340,21 @@ static void xol_free_insn_slot(struct task_struct *tsk) } } +void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + /* Initialize the slot */ + copy_to_page(page, vaddr, src, len); + + /* + * We probably need flush_icache_user_range() but it needs vma. + * This should work on most of architectures by default. If + * architecture needs to do something different it can define + * its own version of the function. + */ + flush_dcache_page(page); +} + /** * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs * @regs: Reflects the saved state of the task after it has hit a breakpoint -- cgit v1.2.3 From d25a2a16f0889de4a1cd8639896f35dc9465f6f5 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 2 Apr 2014 12:47:37 +0200 Subject: iommu: Add driver for Renesas VMSA-compatible IPMMU Signed-off-by: Laurent Pinchart Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 12 + drivers/iommu/Makefile | 1 + drivers/iommu/ipmmu-vmsa.c | 1070 ++++++++++++++++++++++++++++++ include/linux/platform_data/ipmmu-vmsa.h | 24 + 4 files changed, 1107 insertions(+) create mode 100644 drivers/iommu/ipmmu-vmsa.c create mode 100644 include/linux/platform_data/ipmmu-vmsa.h (limited to 'include/linux') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index df56e4c74a7e..a22b537caacd 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -272,6 +272,18 @@ config SHMOBILE_IOMMU_L1SIZE default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB +config IPMMU_VMSA + bool "Renesas VMSA-compatible IPMMU" + depends on ARM_LPAE + depends on ARCH_SHMOBILE || COMPILE_TEST + select IOMMU_API + select ARM_DMA_USE_IOMMU + help + Support for the Renesas VMSA-compatible IPMMU Renesas found in the + R-Mobile APE6 and R-Car H2/M2 SoCs. + + If unsure, say N. + config SPAPR_TCE_IOMMU bool "sPAPR TCE IOMMU Support" depends on PPC_POWERNV || PPC_PSERIES diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 5d58bf16e9e3..8893bad048e0 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o +obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c new file mode 100644 index 000000000000..b084530babf4 --- /dev/null +++ b/drivers/iommu/ipmmu-vmsa.c @@ -0,0 +1,1070 @@ +/* + * IPMMU VMSA + * + * Copyright (C) 2014 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ipmmu_vmsa_device { + struct device *dev; + void __iomem *base; + struct list_head list; + + const struct ipmmu_vmsa_platform_data *pdata; + unsigned int num_utlbs; + + struct dma_iommu_mapping *mapping; +}; + +struct ipmmu_vmsa_domain { + struct ipmmu_vmsa_device *mmu; + struct iommu_domain *io_domain; + + unsigned int context_id; + spinlock_t lock; /* Protects mappings */ + pgd_t *pgd; +}; + +static DEFINE_SPINLOCK(ipmmu_devices_lock); +static LIST_HEAD(ipmmu_devices); + +#define TLB_LOOP_TIMEOUT 100 /* 100us */ + +/* ----------------------------------------------------------------------------- + * Registers Definition + */ + +#define IM_CTX_SIZE 0x40 + +#define IMCTR 0x0000 +#define IMCTR_TRE (1 << 17) +#define IMCTR_AFE (1 << 16) +#define IMCTR_RTSEL_MASK (3 << 4) +#define IMCTR_RTSEL_SHIFT 4 +#define IMCTR_TREN (1 << 3) +#define IMCTR_INTEN (1 << 2) +#define IMCTR_FLUSH (1 << 1) +#define IMCTR_MMUEN (1 << 0) + +#define IMCAAR 0x0004 + +#define IMTTBCR 0x0008 +#define IMTTBCR_EAE (1 << 31) +#define IMTTBCR_PMB (1 << 30) +#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) +#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) +#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) +#define IMTTBCR_SH1_MASK (3 << 28) +#define IMTTBCR_ORGN1_NC (0 << 26) +#define IMTTBCR_ORGN1_WB_WA (1 << 26) +#define IMTTBCR_ORGN1_WT (2 << 26) +#define IMTTBCR_ORGN1_WB (3 << 26) +#define IMTTBCR_ORGN1_MASK (3 << 26) +#define IMTTBCR_IRGN1_NC (0 << 24) +#define IMTTBCR_IRGN1_WB_WA (1 << 24) +#define IMTTBCR_IRGN1_WT (2 << 24) +#define IMTTBCR_IRGN1_WB (3 << 24) +#define IMTTBCR_IRGN1_MASK (3 << 24) +#define IMTTBCR_TSZ1_MASK (7 << 16) +#define IMTTBCR_TSZ1_SHIFT 16 +#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) +#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) +#define IMTTBCR_SH0_MASK (3 << 12) +#define IMTTBCR_ORGN0_NC (0 << 10) +#define IMTTBCR_ORGN0_WB_WA (1 << 10) +#define IMTTBCR_ORGN0_WT (2 << 10) +#define IMTTBCR_ORGN0_WB (3 << 10) +#define IMTTBCR_ORGN0_MASK (3 << 10) +#define IMTTBCR_IRGN0_NC (0 << 8) +#define IMTTBCR_IRGN0_WB_WA (1 << 8) +#define IMTTBCR_IRGN0_WT (2 << 8) +#define IMTTBCR_IRGN0_WB (3 << 8) +#define IMTTBCR_IRGN0_MASK (3 << 8) +#define IMTTBCR_SL0_LVL_2 (0 << 4) +#define IMTTBCR_SL0_LVL_1 (1 << 4) +#define IMTTBCR_TSZ0_MASK (7 << 0) +#define IMTTBCR_TSZ0_SHIFT O + +#define IMBUSCR 0x000c +#define IMBUSCR_DVM (1 << 2) +#define IMBUSCR_BUSSEL_SYS (0 << 0) +#define IMBUSCR_BUSSEL_CCI (1 << 0) +#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) +#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) +#define IMBUSCR_BUSSEL_MASK (3 << 0) + +#define IMTTLBR0 0x0010 +#define IMTTUBR0 0x0014 +#define IMTTLBR1 0x0018 +#define IMTTUBR1 0x001c + +#define IMSTR 0x0020 +#define IMSTR_ERRLVL_MASK (3 << 12) +#define IMSTR_ERRLVL_SHIFT 12 +#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) +#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) +#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) +#define IMSTR_ERRCODE_MASK (7 << 8) +#define IMSTR_MHIT (1 << 4) +#define IMSTR_ABORT (1 << 2) +#define IMSTR_PF (1 << 1) +#define IMSTR_TF (1 << 0) + +#define IMMAIR0 0x0028 +#define IMMAIR1 0x002c +#define IMMAIR_ATTR_MASK 0xff +#define IMMAIR_ATTR_DEVICE 0x04 +#define IMMAIR_ATTR_NC 0x44 +#define IMMAIR_ATTR_WBRWA 0xff +#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) +#define IMMAIR_ATTR_IDX_NC 0 +#define IMMAIR_ATTR_IDX_WBRWA 1 +#define IMMAIR_ATTR_IDX_DEV 2 + +#define IMEAR 0x0030 + +#define IMPCTR 0x0200 +#define IMPSTR 0x0208 +#define IMPEAR 0x020c +#define IMPMBA(n) (0x0280 + ((n) * 4)) +#define IMPMBD(n) (0x02c0 + ((n) * 4)) + +#define IMUCTR(n) (0x0300 + ((n) * 16)) +#define IMUCTR_FIXADDEN (1 << 31) +#define IMUCTR_FIXADD_MASK (0xff << 16) +#define IMUCTR_FIXADD_SHIFT 16 +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) +#define IMUCTR_TTSEL_PMB (8 << 4) +#define IMUCTR_TTSEL_MASK (15 << 4) +#define IMUCTR_FLUSH (1 << 1) +#define IMUCTR_MMUEN (1 << 0) + +#define IMUASID(n) (0x0308 + ((n) * 16)) +#define IMUASID_ASID8_MASK (0xff << 8) +#define IMUASID_ASID8_SHIFT 8 +#define IMUASID_ASID0_MASK (0xff << 0) +#define IMUASID_ASID0_SHIFT 0 + +/* ----------------------------------------------------------------------------- + * Page Table Bits + */ + +/* + * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access, + * Long-descriptor format" that the NStable bit being set in a table descriptor + * will result in the NStable and NS bits of all child entries being ignored and + * considered as being set. The IPMMU seems not to comply with this, as it + * generates a secure access page fault if any of the NStable and NS bits isn't + * set when running in non-secure mode. + */ +#ifndef PMD_NSTABLE +#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63) +#endif + +#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53) +#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52) +#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10) +#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8) +#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8) +#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8) +#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5) +#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0) + +/* Stage-1 PTE */ +#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6) +#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6) +#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2 +#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11) + +/* Stage-2 PTE */ +#define ARM_VMSA_PTE_HAP_FAULT (((pteval_t)0) << 6) +#define ARM_VMSA_PTE_HAP_READ (((pteval_t)1) << 6) +#define ARM_VMSA_PTE_HAP_WRITE (((pteval_t)2) << 6) +#define ARM_VMSA_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) +#define ARM_VMSA_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) +#define ARM_VMSA_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) + +/* ----------------------------------------------------------------------------- + * Read/Write Access + */ + +static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) +{ + return ioread32(mmu->base + offset); +} + +static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, + u32 data) +{ + iowrite32(data, mmu->base + offset); +} + +static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) +{ + return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); +} + +static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, + u32 data) +{ + ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); +} + +/* ----------------------------------------------------------------------------- + * TLB and microTLB Management + */ + +/* Wait for any pending TLB invalidations to complete */ +static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) +{ + unsigned int count = 0; + + while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { + cpu_relax(); + if (++count == TLB_LOOP_TIMEOUT) { + dev_err_ratelimited(domain->mmu->dev, + "TLB sync timed out -- MMU may be deadlocked\n"); + return; + } + udelay(1); + } +} + +static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) +{ + u32 reg; + + reg = ipmmu_ctx_read(domain, IMCTR); + reg |= IMCTR_FLUSH; + ipmmu_ctx_write(domain, IMCTR, reg); + + ipmmu_tlb_sync(domain); +} + +/* + * Enable MMU translation for the microTLB. + */ +static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, + const struct ipmmu_vmsa_master *master) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + /* TODO: What should we set the ASID to ? */ + ipmmu_write(mmu, IMUASID(master->utlb), 0); + /* TODO: Do we need to flush the microTLB ? */ + ipmmu_write(mmu, IMUCTR(master->utlb), + IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | + IMUCTR_MMUEN); +} + +/* + * Disable MMU translation for the microTLB. + */ +static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, + const struct ipmmu_vmsa_master *master) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + ipmmu_write(mmu, IMUCTR(master->utlb), 0); +} + +static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, + size_t size) +{ + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + /* + * TODO: Add support for coherent walk through CCI with DVM and remove + * cache handling. + */ + dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE); +} + +/* ----------------------------------------------------------------------------- + * Domain/Context Management + */ + +static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) +{ + phys_addr_t ttbr; + u32 reg; + + /* + * TODO: When adding support for multiple contexts, find an unused + * context. + */ + domain->context_id = 0; + + /* TTBR0 */ + ipmmu_flush_pgtable(domain->mmu, domain->pgd, + PTRS_PER_PGD * sizeof(*domain->pgd)); + ttbr = __pa(domain->pgd); + ipmmu_ctx_write(domain, IMTTLBR0, ttbr); + ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); + + /* + * TTBCR + * We use long descriptors with inner-shareable WBWA tables and allocate + * the whole 32-bit VA space to TTBR0. + */ + ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | + IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); + + /* + * MAIR0 + * We need three attributes only, non-cacheable, write-back read/write + * allocate and device memory. + */ + reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC)) + | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA)) + | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV)); + ipmmu_ctx_write(domain, IMMAIR0, reg); + + /* IMBUSCR */ + ipmmu_ctx_write(domain, IMBUSCR, + ipmmu_ctx_read(domain, IMBUSCR) & + ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); + + /* + * IMSTR + * Clear all interrupt flags. + */ + ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); + + /* + * IMCTR + * Enable the MMU and interrupt generation. The long-descriptor + * translation table format doesn't use TEX remapping. Don't enable AF + * software management as we have no use for it. Flush the TLB as + * required when modifying the context registers. + */ + ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); + + return 0; +} + +static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) +{ + /* + * Disable the context. Flush the TLB as required when modifying the + * context registers. + * + * TODO: Is TLB flush really needed ? + */ + ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); + ipmmu_tlb_sync(domain); +} + +/* ----------------------------------------------------------------------------- + * Fault Handling + */ + +static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) +{ + const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; + struct ipmmu_vmsa_device *mmu = domain->mmu; + u32 status; + u32 iova; + + status = ipmmu_ctx_read(domain, IMSTR); + if (!(status & err_mask)) + return IRQ_NONE; + + iova = ipmmu_ctx_read(domain, IMEAR); + + /* + * Clear the error status flags. Unlike traditional interrupt flag + * registers that must be cleared by writing 1, this status register + * seems to require 0. The error address register must be read before, + * otherwise its value will be 0. + */ + ipmmu_ctx_write(domain, IMSTR, 0); + + /* Log fatal errors. */ + if (status & IMSTR_MHIT) + dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", + iova); + if (status & IMSTR_ABORT) + dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", + iova); + + if (!(status & (IMSTR_PF | IMSTR_TF))) + return IRQ_NONE; + + /* + * Try to handle page faults and translation faults. + * + * TODO: We need to look up the faulty device based on the I/O VA. Use + * the IOMMU device for now. + */ + if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0)) + return IRQ_HANDLED; + + dev_err_ratelimited(mmu->dev, + "Unhandled fault: status 0x%08x iova 0x%08x\n", + status, iova); + + return IRQ_HANDLED; +} + +static irqreturn_t ipmmu_irq(int irq, void *dev) +{ + struct ipmmu_vmsa_device *mmu = dev; + struct iommu_domain *io_domain; + struct ipmmu_vmsa_domain *domain; + + if (!mmu->mapping) + return IRQ_NONE; + + io_domain = mmu->mapping->domain; + domain = io_domain->priv; + + return ipmmu_domain_irq(domain); +} + +/* ----------------------------------------------------------------------------- + * Page Table Management + */ + +static void ipmmu_free_ptes(pmd_t *pmd) +{ + pgtable_t table = pmd_pgtable(*pmd); + __free_page(table); +} + +static void ipmmu_free_pmds(pud_t *pud) +{ + pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); + unsigned int i; + + pmd = pmd_base; + for (i = 0; i < PTRS_PER_PMD; ++i) { + if (pmd_none(*pmd)) + continue; + + ipmmu_free_ptes(pmd); + pmd++; + } + + pmd_free(NULL, pmd_base); +} + +static void ipmmu_free_puds(pgd_t *pgd) +{ + pud_t *pud, *pud_base = pud_offset(pgd, 0); + unsigned int i; + + pud = pud_base; + for (i = 0; i < PTRS_PER_PUD; ++i) { + if (pud_none(*pud)) + continue; + + ipmmu_free_pmds(pud); + pud++; + } + + pud_free(NULL, pud_base); +} + +static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain) +{ + pgd_t *pgd, *pgd_base = domain->pgd; + unsigned int i; + + /* + * Recursively free the page tables for this domain. We don't care about + * speculative TLB filling, because the TLB will be nuked next time this + * context bank is re-allocated and no devices currently map to these + * tables. + */ + pgd = pgd_base; + for (i = 0; i < PTRS_PER_PGD; ++i) { + if (pgd_none(*pgd)) + continue; + ipmmu_free_puds(pgd); + pgd++; + } + + kfree(pgd_base); +} + +/* + * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte) + * functions as they would flush the CPU TLB. + */ + +static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long addr, unsigned long end, + phys_addr_t phys, int prot) +{ + unsigned long pfn = __phys_to_pfn(phys); + pteval_t pteval = ARM_VMSA_PTE_PAGE | ARM_VMSA_PTE_NS | ARM_VMSA_PTE_AF + | ARM_VMSA_PTE_XN; + pte_t *pte, *start; + + if (pmd_none(*pmd)) { + /* Allocate a new set of tables */ + pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); + if (!pte) + return -ENOMEM; + + ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE); + *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + pte += pte_index(addr); + } else + pte = pte_offset_kernel(pmd, addr); + + pteval |= ARM_VMSA_PTE_AP_UNPRIV | ARM_VMSA_PTE_nG; + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) + pteval |= ARM_VMSA_PTE_AP_RDONLY; + + if (prot & IOMMU_CACHE) + pteval |= (IMMAIR_ATTR_IDX_WBRWA << + ARM_VMSA_PTE_ATTRINDX_SHIFT); + + /* If no access, create a faulting entry to avoid TLB fills */ + if (prot & IOMMU_EXEC) + pteval &= ~ARM_VMSA_PTE_XN; + else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + pteval &= ~ARM_VMSA_PTE_PAGE; + + pteval |= ARM_VMSA_PTE_SH_IS; + start = pte; + + /* Install the page table entries. */ + do { + *pte++ = pfn_pte(pfn++, __pgprot(pteval)); + addr += PAGE_SIZE; + } while (addr != end); + + ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * (pte - start)); + return 0; +} + +static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud, + unsigned long addr, unsigned long end, + phys_addr_t phys, int prot) +{ + unsigned long next; + pmd_t *pmd; + int ret; + +#ifndef __PAGETABLE_PMD_FOLDED + if (pud_none(*pud)) { + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + if (!pmd) + return -ENOMEM; + + ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE); + *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); + + pmd += pmd_index(addr); + } else +#endif + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + ret = ipmmu_alloc_init_pte(mmu, pmd, addr, end, phys, prot); + phys += next - addr; + } while (pmd++, addr = next, addr < end); + + return ret; +} + +static int ipmmu_alloc_init_pud(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, + unsigned long addr, unsigned long end, + phys_addr_t phys, int prot) +{ + unsigned long next; + pud_t *pud; + int ret; + +#ifndef __PAGETABLE_PUD_FOLDED + if (pgd_none(*pgd)) { + pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); + if (!pud) + return -ENOMEM; + + ipmmu_flush_pgtable(mmu, pud, PAGE_SIZE); + *pgd = __pgd(__pa(pud) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pgd, sizeof(*pgd)); + + pud += pud_index(addr); + } else +#endif + pud = pud_offset(pgd, addr); + + do { + next = pud_addr_end(addr, end); + ret = ipmmu_alloc_init_pmd(mmu, pud, addr, next, phys, prot); + phys += next - addr; + } while (pud++, addr = next, addr < end); + + return ret; +} + +static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + pgd_t *pgd = domain->pgd; + unsigned long flags; + unsigned long end; + int ret; + + if (!pgd) + return -EINVAL; + + if (size & ~PAGE_MASK) + return -EINVAL; + + if (paddr & ~((1ULL << 40) - 1)) + return -ERANGE; + + spin_lock_irqsave(&domain->lock, flags); + + pgd += pgd_index(iova); + end = iova + size; + + do { + unsigned long next = pgd_addr_end(iova, end); + + ret = ipmmu_alloc_init_pud(mmu, pgd, iova, next, paddr, prot); + if (ret) + break; + + paddr += next - iova; + iova = next; + } while (pgd++, iova != end); + + spin_unlock_irqrestore(&domain->lock, flags); + + ipmmu_tlb_invalidate(domain); + + return ret; +} + +/* ----------------------------------------------------------------------------- + * IOMMU Operations + */ + +static const struct ipmmu_vmsa_master * +ipmmu_find_master(struct ipmmu_vmsa_device *ipmmu, struct device *dev) +{ + const struct ipmmu_vmsa_master *master = ipmmu->pdata->masters; + const char *devname = dev_name(dev); + unsigned int i; + + for (i = 0; i < ipmmu->pdata->num_masters; ++i, ++master) { + if (strcmp(master->name, devname) == 0) + return master; + } + + return NULL; +} + +static int ipmmu_domain_init(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return -ENOMEM; + + spin_lock_init(&domain->lock); + + domain->pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + if (!domain->pgd) { + kfree(domain); + return -ENOMEM; + } + + io_domain->priv = domain; + domain->io_domain = io_domain; + + return 0; +} + +static void ipmmu_domain_destroy(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + /* + * Free the domain resources. We assume that all devices have already + * been detached. + */ + ipmmu_domain_destroy_context(domain); + ipmmu_free_pgtables(domain); + kfree(domain); +} + +static int ipmmu_attach_device(struct iommu_domain *io_domain, + struct device *dev) +{ + struct ipmmu_vmsa_device *mmu = dev->archdata.iommu; + struct ipmmu_vmsa_domain *domain = io_domain->priv; + const struct ipmmu_vmsa_master *master; + unsigned long flags; + int ret = 0; + + if (!mmu) { + dev_err(dev, "Cannot attach to IPMMU\n"); + return -ENXIO; + } + + spin_lock_irqsave(&domain->lock, flags); + + if (!domain->mmu) { + /* The domain hasn't been used yet, initialize it. */ + domain->mmu = mmu; + ret = ipmmu_domain_init_context(domain); + } else if (domain->mmu != mmu) { + /* + * Something is wrong, we can't attach two devices using + * different IOMMUs to the same domain. + */ + dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", + dev_name(mmu->dev), dev_name(domain->mmu->dev)); + ret = -EINVAL; + } + + spin_unlock_irqrestore(&domain->lock, flags); + + if (ret < 0) + return ret; + + master = ipmmu_find_master(mmu, dev); + if (!master) + return -EINVAL; + + ipmmu_utlb_enable(domain, master); + + return 0; +} + +static void ipmmu_detach_device(struct iommu_domain *io_domain, + struct device *dev) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + const struct ipmmu_vmsa_master *master; + + master = ipmmu_find_master(domain->mmu, dev); + if (!master) + return; + + ipmmu_utlb_disable(domain, master); + + /* + * TODO: Optimize by disabling the context when no device is attached. + */ +} + +static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + if (!domain) + return -ENODEV; + + return ipmmu_handle_mapping(domain, iova, paddr, size, prot); +} + +static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, + size_t size) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + int ret; + + ret = ipmmu_handle_mapping(domain, iova, 0, size, 0); + return ret ? 0 : size; +} + +static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, + dma_addr_t iova) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + pgd_t pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; + + /* TODO: Is locking needed ? */ + + if (!domain->pgd) + return 0; + + pgd = *(domain->pgd + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; + + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; + + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; + + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); + if (pte_none(pte)) + return 0; + + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); +} + +static int ipmmu_add_device(struct device *dev) +{ + const struct ipmmu_vmsa_master *master = NULL; + struct ipmmu_vmsa_device *mmu; + struct iommu_group *group; + int ret; + + if (dev->archdata.iommu) { + dev_warn(dev, "IOMMU driver already assigned to device %s\n", + dev_name(dev)); + return -EINVAL; + } + + /* Find the master corresponding to the device. */ + spin_lock(&ipmmu_devices_lock); + + list_for_each_entry(mmu, &ipmmu_devices, list) { + master = ipmmu_find_master(mmu, dev); + if (master) { + /* + * TODO Take a reference to the master to protect + * against device removal. + */ + break; + } + } + + spin_unlock(&ipmmu_devices_lock); + + if (!master) + return -ENODEV; + + if (!master->utlb >= mmu->num_utlbs) + return -EINVAL; + + /* Create a device group and add the device to it. */ + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + + ret = iommu_group_add_device(group, dev); + iommu_group_put(group); + + if (ret < 0) { + dev_err(dev, "Failed to add device to IPMMU group\n"); + return ret; + } + + dev->archdata.iommu = mmu; + + /* + * Create the ARM mapping, used by the ARM DMA mapping core to allocate + * VAs. This will allocate a corresponding IOMMU domain. + * + * TODO: + * - Create one mapping per context (TLB). + * - Make the mapping size configurable ? We currently use a 2GB mapping + * at a 1GB offset to ensure that NULL VAs will fault. + */ + if (!mmu->mapping) { + struct dma_iommu_mapping *mapping; + + mapping = arm_iommu_create_mapping(&platform_bus_type, + SZ_1G, SZ_2G, 0); + if (IS_ERR(mapping)) { + dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); + return PTR_ERR(mapping); + } + + mmu->mapping = mapping; + } + + /* Attach the ARM VA mapping to the device. */ + ret = arm_iommu_attach_device(dev, mmu->mapping); + if (ret < 0) { + dev_err(dev, "Failed to attach device to VA mapping\n"); + goto error; + } + + return 0; + +error: + dev->archdata.iommu = NULL; + iommu_group_remove_device(dev); + return ret; +} + +static void ipmmu_remove_device(struct device *dev) +{ + arm_iommu_detach_device(dev); + iommu_group_remove_device(dev); + dev->archdata.iommu = NULL; +} + +static struct iommu_ops ipmmu_ops = { + .domain_init = ipmmu_domain_init, + .domain_destroy = ipmmu_domain_destroy, + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .iova_to_phys = ipmmu_iova_to_phys, + .add_device = ipmmu_add_device, + .remove_device = ipmmu_remove_device, + .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, +}; + +/* ----------------------------------------------------------------------------- + * Probe/remove and init + */ + +static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) +{ + unsigned int i; + + /* Disable all contexts. */ + for (i = 0; i < 4; ++i) + ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); +} + +static int ipmmu_probe(struct platform_device *pdev) +{ + struct ipmmu_vmsa_device *mmu; + struct resource *res; + int irq; + int ret; + + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "missing platform data\n"); + return -EINVAL; + } + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) { + dev_err(&pdev->dev, "cannot allocate device data\n"); + return -ENOMEM; + } + + mmu->dev = &pdev->dev; + mmu->pdata = pdev->dev.platform_data; + mmu->num_utlbs = 32; + + /* Map I/O memory and request IRQ. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mmu->base)) + return PTR_ERR(mmu->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "no IRQ found\n"); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, + dev_name(&pdev->dev), mmu); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); + return irq; + } + + ipmmu_device_reset(mmu); + + /* + * We can't create the ARM mapping here as it requires the bus to have + * an IOMMU, which only happens when bus_set_iommu() is called in + * ipmmu_init() after the probe function returns. + */ + + spin_lock(&ipmmu_devices_lock); + list_add(&mmu->list, &ipmmu_devices); + spin_unlock(&ipmmu_devices_lock); + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int ipmmu_remove(struct platform_device *pdev) +{ + struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); + + spin_lock(&ipmmu_devices_lock); + list_del(&mmu->list); + spin_unlock(&ipmmu_devices_lock); + + arm_iommu_release_mapping(mmu->mapping); + + ipmmu_device_reset(mmu); + + return 0; +} + +static struct platform_driver ipmmu_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "ipmmu-vmsa", + }, + .probe = ipmmu_probe, + .remove = ipmmu_remove, +}; + +static int __init ipmmu_init(void) +{ + int ret; + + ret = platform_driver_register(&ipmmu_driver); + if (ret < 0) + return ret; + + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &ipmmu_ops); + + return 0; +} + +static void __exit ipmmu_exit(void) +{ + return platform_driver_unregister(&ipmmu_driver); +} + +subsys_initcall(ipmmu_init); +module_exit(ipmmu_exit); + +MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); +MODULE_AUTHOR("Laurent Pinchart "); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h new file mode 100644 index 000000000000..5275b3ac6d37 --- /dev/null +++ b/include/linux/platform_data/ipmmu-vmsa.h @@ -0,0 +1,24 @@ +/* + * IPMMU VMSA Platform Data + * + * Copyright (C) 2014 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#ifndef __IPMMU_VMSA_H__ +#define __IPMMU_VMSA_H__ + +struct ipmmu_vmsa_master { + const char *name; + unsigned int utlb; +}; + +struct ipmmu_vmsa_platform_data { + const struct ipmmu_vmsa_master *masters; + unsigned int num_masters; +}; + +#endif /* __IPMMU_VMSA_H__ */ -- cgit v1.2.3 From 9b1ee0b2cb8bffdbb3003b1d5205f3ae0592c15a Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Fri, 25 Apr 2014 22:45:30 +0900 Subject: ALSA: firewire/bebob: Add a workaround for M-Audio special Firewire series In post commit, a quirk of this firmware about transactions is reported. This commit apply a workaround for this quirk. They often fail transactions due to gap_count mismatch. This state is changed by generating bus reset. The fw_schedule_bus_reset() is an exported symbol in firewire-core. But there are no header for public. This commit moves its prototype from drivers/firewire/core.h to include/linux/firewire.h. This mismatch still affects bus management before generating this bus reset. It still takes a time to call driver's probe() because transactions are still often failed. Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- drivers/firewire/core.h | 1 - include/linux/firewire.h | 3 +++ sound/firewire/bebob/bebob.c | 32 ++++++++++++++++++++++++++++---- sound/firewire/bebob/bebob.h | 1 + 4 files changed, 32 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c98764aeeec6..870044e82316 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -118,7 +118,6 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid); void fw_core_remove_card(struct fw_card *card); int fw_compute_block_crc(__be32 *block); -void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); /* -cdev */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h index c3683bdf28fe..d4b7683c722d 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -367,6 +367,9 @@ static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) return tag << 14 | channel << 8 | sy; } +void fw_schedule_bus_reset(struct fw_card *card, bool delayed, + bool short_reset); + struct fw_descriptor { struct list_head link; size_t length; diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index e1dd4219ea6c..31b96b7264cf 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -247,10 +247,26 @@ bebob_probe(struct fw_unit *unit, if (err < 0) goto error; - err = snd_card_register(card); - if (err < 0) { - snd_bebob_stream_destroy_duplex(bebob); - goto error; + if (!bebob->maudio_special_quirk) { + err = snd_card_register(card); + if (err < 0) { + snd_bebob_stream_destroy_duplex(bebob); + goto error; + } + } else { + /* + * This is a workaround. This bus reset seems to have an effect + * to make devices correctly handling transactions. Without + * this, the devices have gap_count mismatch. This causes much + * failure of transaction. + * + * Just after registration, user-land application receive + * signals from dbus and starts I/Os. To avoid I/Os till the + * future bus reset, registration is done in next update(). + */ + bebob->deferred_registration = true; + fw_schedule_bus_reset(fw_parent_device(bebob->unit)->card, + false, true); } dev_set_drvdata(&unit->device, bebob); @@ -273,6 +289,14 @@ bebob_update(struct fw_unit *unit) fcp_bus_reset(bebob->unit); snd_bebob_stream_update_duplex(bebob); + + if (bebob->deferred_registration) { + if (snd_card_register(bebob->card) < 0) { + snd_bebob_stream_destroy_duplex(bebob); + snd_card_free(bebob->card); + } + bebob->deferred_registration = false; + } } static void bebob_remove(struct fw_unit *unit) diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h index 4a54e746c5c6..91b26b0c649a 100644 --- a/sound/firewire/bebob/bebob.h +++ b/sound/firewire/bebob/bebob.h @@ -109,6 +109,7 @@ struct snd_bebob { /* for M-Audio special devices */ void *maudio_special_quirk; + bool deferred_registration; }; static inline int -- cgit v1.2.3 From 45fef5b88d1f2f47ecdefae6354372d440ca5c84 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Thu, 22 May 2014 12:47:47 +0200 Subject: ACPI: add dynamic_debug support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 1a699476e258 ("ACPI / hotplug / PCI: Hotplug notifications from acpi_bus_notify()") added debug messages for a few common events. These debug messages are unconditionally enabled if CONFIG_DYNAMIC_DEBUG is defined, contrary to the documented meaning, making the ACPI system spew lots of unwanted noise on any kernel with dynamic debugging. The bug was introduced by commit fbfddae69657 ("ACPI: Add acpi_handle_() interfaces"), which added the CONFIG_DYNAMIC_DEBUG dependency without respecting its meaning. Fix by adding real support for dynamic_debug. Fixes: fbfddae69657 ("ACPI: Add acpi_handle_() interfaces") Signed-off-by: Bjørn Mork Signed-off-by: Rafael J. Wysocki --- drivers/acpi/utils.c | 64 ++++++++++++++++++++++++++++++++++++++++++---------- include/linux/acpi.h | 22 ++++++++++++++++-- 2 files changed, 72 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index bba526148583..07c8c5a5ee95 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "internal.h" @@ -456,6 +457,24 @@ acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, } EXPORT_SYMBOL(acpi_evaluate_ost); +/** + * acpi_handle_path: Return the object path of handle + * + * Caller must free the returned buffer + */ +static char *acpi_handle_path(acpi_handle handle) +{ + struct acpi_buffer buffer = { + .length = ACPI_ALLOCATE_BUFFER, + .pointer = NULL + }; + + if (in_interrupt() || + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) + return NULL; + return buffer.pointer; +} + /** * acpi_handle_printk: Print message with ACPI prefix and object path * @@ -469,29 +488,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...) { struct va_format vaf; va_list args; - struct acpi_buffer buffer = { - .length = ACPI_ALLOCATE_BUFFER, - .pointer = NULL - }; const char *path; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - if (in_interrupt() || - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) - path = ""; - else - path = buffer.pointer; - - printk("%sACPI: %s: %pV", level, path, &vaf); + path = acpi_handle_path(handle); + printk("%sACPI: %s: %pV", level, path ? path : "" , &vaf); va_end(args); - kfree(buffer.pointer); + kfree(path); } EXPORT_SYMBOL(acpi_handle_printk); +#if defined(CONFIG_DYNAMIC_DEBUG) +/** + * __acpi_handle_debug: pr_debug with ACPI prefix and object path + * + * This function is called through acpi_handle_debug macro and debug + * prints a message with ACPI prefix and object path. This function + * acquires the global namespace mutex to obtain an object path. In + * interrupt context, it shows the object path as . + */ +void +__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + const char *path; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + path = acpi_handle_path(handle); + __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "", &vaf); + + va_end(args); + kfree(path); +} +EXPORT_SYMBOL(__acpi_handle_debug); +#endif + /** * acpi_has_method: Check whether @handle has a method named @name * @handle: ACPI device handle diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7a8f2cd66c8b..0e2569031a6f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -37,6 +37,7 @@ #include #include +#include #include #include @@ -589,6 +590,14 @@ static inline __printf(3, 4) void acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} #endif /* !CONFIG_ACPI */ +#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) +__printf(3, 4) +void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); +#else +#define __acpi_handle_debug(descriptor, handle, fmt, ...) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); +#endif + /* * acpi_handle_: Print message with ACPI prefix and object path * @@ -610,11 +619,19 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} #define acpi_handle_info(handle, fmt, ...) \ acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) -/* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */ -#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +#if defined(DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) #else +#if defined(CONFIG_DYNAMIC_DEBUG) +#define acpi_handle_debug(handle, fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) +#else #define acpi_handle_debug(handle, fmt, ...) \ ({ \ if (0) \ @@ -622,5 +639,6 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} 0; \ }) #endif +#endif #endif /*_LINUX_ACPI_H*/ -- cgit v1.2.3 From 6c46ccc8bb0660c1805f6662d4646eb5405dcb2d Mon Sep 17 00:00:00 2001 From: Alban Bedel Date: Tue, 20 May 2014 12:14:03 +0200 Subject: regulator: tps6586x: Add support for the TPS658640 The TPS658640 has a different set of output voltage for most LDO and the RTC LDO isn't settable. This chip also report 2 different version ID, as the datasheet doesn't list the possible values the second ID has simply been named TPS658640v2. Signed-off-by: Alban Bedel Signed-off-by: Mark Brown --- drivers/mfd/tps6586x.c | 4 ++++ drivers/regulator/tps6586x-regulator.c | 36 ++++++++++++++++++++++++++++++++++ include/linux/mfd/tps6586x.h | 2 ++ 3 files changed, 42 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index bbd54414a75d..835e5549ecdd 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -495,6 +495,10 @@ static void tps6586x_print_version(struct i2c_client *client, int version) case TPS658623: name = "TPS658623"; break; + case TPS658640: + case TPS658640v2: + name = "TPS658640"; + break; case TPS658643: name = "TPS658643"; break; diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index da8ee0217573..e045b7fe5572 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -116,6 +116,13 @@ static const unsigned int tps6586x_sm2_voltages[] = { 4200000, 4250000, 4300000, 4350000, 4400000, 4450000, 4500000, 4550000, }; +static int tps658640_sm2_voltages[] = { + 2150000, 2200000, 2250000, 2300000, 2350000, 2400000, 2450000, 2500000, + 2550000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, 2900000, + 2950000, 3000000, 3050000, 3100000, 3150000, 3200000, 3250000, 3300000, + 3350000, 3400000, 3450000, 3500000, 3550000, 3600000, 3650000, 3700000, +}; + static const unsigned int tps658643_sm2_voltages[] = { 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, @@ -130,6 +137,10 @@ static const unsigned int tps6586x_dvm_voltages[] = { 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, }; +static int tps658640_rtc_voltages[] = { + 2500000, 2850000, 3100000, 3300000, +}; + #define TPS6586X_REGULATOR(_id, _ops, _pin_name, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ .desc = { \ @@ -224,6 +235,26 @@ static struct tps6586x_regulator tps658623_regulator[] = { END, 7), }; +static struct tps6586x_regulator tps658640_regulator[] = { + TPS6586X_LDO(LDO_3, "vinldo23", tps6586x_ldo0, SUPPLYV4, 0, 3, + ENC, 2, END, 2), + TPS6586X_LDO(LDO_5, "REG-SYS", tps6586x_ldo0, SUPPLYV6, 0, 3, + ENE, 6, ENE, 6), + TPS6586X_LDO(LDO_6, "vinldo678", tps6586x_ldo0, SUPPLYV3, 0, 3, + ENC, 4, END, 4), + TPS6586X_LDO(LDO_7, "vinldo678", tps6586x_ldo0, SUPPLYV3, 3, 3, + ENC, 5, END, 5), + TPS6586X_LDO(LDO_8, "vinldo678", tps6586x_ldo0, SUPPLYV2, 5, 3, + ENC, 6, END, 6), + TPS6586X_LDO(LDO_9, "vinldo9", tps6586x_ldo0, SUPPLYV6, 3, 3, + ENE, 7, ENE, 7), + TPS6586X_LDO(SM_2, "vin-sm2", tps658640_sm2, SUPPLYV2, 0, 5, + ENC, 7, END, 7), + + TPS6586X_FIXED_LDO(LDO_RTC, "REG-SYS", tps658640_rtc, SUPPLYV4, 3, 2, + V4, 7, V4, 7), +}; + static struct tps6586x_regulator tps658643_regulator[] = { TPS6586X_LDO(SM_2, "vin-sm2", tps658643_sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), @@ -312,6 +343,11 @@ static struct tps6586x_regulator *find_regulator_info(int id, int version) table = tps658623_regulator; num = ARRAY_SIZE(tps658623_regulator); break; + case TPS658640: + case TPS658640v2: + table = tps658640_regulator; + num = ARRAY_SIZE(tps658640_regulator); + break; case TPS658643: table = tps658643_regulator; num = ARRAY_SIZE(tps658643_regulator); diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h index cbecec2e353a..96187ed9f9bb 100644 --- a/include/linux/mfd/tps6586x.h +++ b/include/linux/mfd/tps6586x.h @@ -17,6 +17,8 @@ #define TPS658621A 0x15 #define TPS658621CD 0x2c #define TPS658623 0x1b +#define TPS658640 0x01 +#define TPS658640v2 0x02 #define TPS658643 0x03 enum { -- cgit v1.2.3 From 3cc6919bd61315ea60baf95f3f9868aacfd1ace4 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 21 May 2014 15:39:54 +0200 Subject: backlight: Add backlight device (un)registration notification Some firmware drivers, ie acpi-video want to get themselves out of the way (in some cases) when their also is a raw backlight device available. Due to module loading ordering being unknown, acpi-video cannot be certain that the backlight_device_registered(BACKLIGHT_RAW) it does for this is the final verdict wrt there being a BACKLIGHT_RAW device. By adding notification acpi-video can listen for backlight devices showing up after it has loaded, and unregister its backlight device if desired. Signed-off-by: Hans de Goede Acked-by: Jingoo Han Signed-off-by: Rafael J. Wysocki --- drivers/video/backlight/backlight.c | 40 +++++++++++++++++++++++++++++++++++++ include/linux/backlight.h | 7 +++++++ 2 files changed, 47 insertions(+) (limited to 'include/linux') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index bd2172c2d650..428089009cd5 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -23,6 +23,7 @@ static struct list_head backlight_dev_list; static struct mutex backlight_dev_list_mutex; +static struct blocking_notifier_head backlight_notifier; static const char *const backlight_types[] = { [BACKLIGHT_RAW] = "raw", @@ -370,6 +371,9 @@ struct backlight_device *backlight_device_register(const char *name, list_add(&new_bd->entry, &backlight_dev_list); mutex_unlock(&backlight_dev_list_mutex); + blocking_notifier_call_chain(&backlight_notifier, + BACKLIGHT_REGISTERED, new_bd); + return new_bd; } EXPORT_SYMBOL(backlight_device_register); @@ -413,6 +417,10 @@ void backlight_device_unregister(struct backlight_device *bd) pmac_backlight = NULL; mutex_unlock(&pmac_backlight_mutex); #endif + + blocking_notifier_call_chain(&backlight_notifier, + BACKLIGHT_UNREGISTERED, bd); + mutex_lock(&bd->ops_lock); bd->ops = NULL; mutex_unlock(&bd->ops_lock); @@ -437,6 +445,36 @@ static int devm_backlight_device_match(struct device *dev, void *res, return *r == data; } +/** + * backlight_register_notifier - get notified of backlight (un)registration + * @nb: notifier block with the notifier to call on backlight (un)registration + * + * @return 0 on success, otherwise a negative error code + * + * Register a notifier to get notified when backlight devices get registered + * or unregistered. + */ +int backlight_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&backlight_notifier, nb); +} +EXPORT_SYMBOL(backlight_register_notifier); + +/** + * backlight_unregister_notifier - unregister a backlight notifier + * @nb: notifier block to unregister + * + * @return 0 on success, otherwise a negative error code + * + * Register a notifier to get notified when backlight devices get registered + * or unregistered. + */ +int backlight_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&backlight_notifier, nb); +} +EXPORT_SYMBOL(backlight_unregister_notifier); + /** * devm_backlight_device_register - resource managed backlight_device_register() * @dev: the device to register @@ -544,6 +582,8 @@ static int __init backlight_class_init(void) backlight_class->pm = &backlight_class_dev_pm_ops; INIT_LIST_HEAD(&backlight_dev_list); mutex_init(&backlight_dev_list_mutex); + BLOCKING_INIT_NOTIFIER_HEAD(&backlight_notifier); + return 0; } diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 72647429adf6..adb14a8616df 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -40,6 +40,11 @@ enum backlight_type { BACKLIGHT_TYPE_MAX, }; +enum backlight_notification { + BACKLIGHT_REGISTERED, + BACKLIGHT_UNREGISTERED, +}; + struct backlight_device; struct fb_info; @@ -133,6 +138,8 @@ extern void devm_backlight_device_unregister(struct device *dev, extern void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); extern bool backlight_device_registered(enum backlight_type type); +extern int backlight_register_notifier(struct notifier_block *nb); +extern int backlight_unregister_notifier(struct notifier_block *nb); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) -- cgit v1.2.3 From 3ebe7f9f7e4a4fd1f6461ecd01ff2961317a483a Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Fri, 2 May 2014 10:40:42 -0600 Subject: PCI: Notify driver before and after device reset Notify a PCI device driver when its device's access is about to be disabled for an impending reset attempt, then after the attempt completes and device access is restored. The notification is via the pci_error_handlers interface. Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas --- drivers/pci/pci.c | 21 +++++++++++++++++++++ include/linux/pci.h | 3 +++ 2 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7325d43bf030..43d87b26ec5b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3305,8 +3305,27 @@ static void pci_dev_unlock(struct pci_dev *dev) pci_cfg_access_unlock(dev); } +/** + * pci_reset_notify - notify device driver of reset + * @dev: device to be notified of reset + * @prepare: 'true' if device is about to be reset; 'false' if reset attempt + * completed + * + * Must be called prior to device access being disabled and after device + * access is restored. + */ +static void pci_reset_notify(struct pci_dev *dev, bool prepare) +{ + const struct pci_error_handlers *err_handler = + dev->driver ? dev->driver->err_handler : NULL; + if (err_handler && err_handler->reset_notify) + err_handler->reset_notify(dev, prepare); +} + static void pci_dev_save_and_disable(struct pci_dev *dev) { + pci_reset_notify(dev, true); + /* * Wake-up device prior to save. PM registers default to D0 after * reset and a simple register restore doesn't reliably return @@ -3328,6 +3347,7 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) static void pci_dev_restore(struct pci_dev *dev) { pci_restore_state(dev); + pci_reset_notify(dev, false); } static int pci_dev_reset(struct pci_dev *dev, int probe) @@ -3344,6 +3364,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) return rc; } + /** * __pci_reset_function - reset a PCI device function * @dev: PCI device to reset diff --git a/include/linux/pci.h b/include/linux/pci.h index aab57b4abe7f..31c43093e538 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -603,6 +603,9 @@ struct pci_error_handlers { /* PCI slot has been reset */ pci_ers_result_t (*slot_reset)(struct pci_dev *dev); + /* PCI function reset prepare or completed */ + void (*reset_notify)(struct pci_dev *dev, bool prepare); + /* Device driver may resume normal operations */ void (*resume)(struct pci_dev *dev); }; -- cgit v1.2.3 From f14bbe77a96bb979dc539d8308ee18a9363a544f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 27 May 2014 12:06:53 -0600 Subject: blk-mq: pass in suggested NUMA node to ->alloc_hctx() Drivers currently have to figure this out on their own, and they are missing information to do it properly. The ones that did attempt to do it, do it wrong. So just pass in the suggested node directly to the alloc function. Signed-off-by: Jens Axboe --- block/blk-mq-cpumap.c | 16 ++++++++++++++++ block/blk-mq.c | 26 +++++++++++++++----------- block/blk-mq.h | 1 + drivers/block/null_blk.c | 35 +++-------------------------------- include/linux/blk-mq.h | 4 ++-- 5 files changed, 37 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 5d0f93cf358c..0daacb927be1 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -96,3 +96,19 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) kfree(map); return NULL; } + +/* + * We have no quick way of doing reverse lookups. This is only used at + * queue init time, so runtime isn't important. + */ +int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) +{ + int i; + + for_each_possible_cpu(i) { + if (index == mq_map[i]) + return cpu_to_node(i); + } + + return NUMA_NO_NODE; +} diff --git a/block/blk-mq.c b/block/blk-mq.c index e8b5f74dc1a1..30bad930e661 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1297,10 +1297,10 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) EXPORT_SYMBOL(blk_mq_map_queue); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, - unsigned int hctx_index) + unsigned int hctx_index, + int node) { - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, - set->numa_node); + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); } EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); @@ -1752,6 +1752,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) struct blk_mq_hw_ctx **hctxs; struct blk_mq_ctx *ctx; struct request_queue *q; + unsigned int *map; int i; ctx = alloc_percpu(struct blk_mq_ctx); @@ -1764,8 +1765,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (!hctxs) goto err_percpu; + map = blk_mq_make_queue_map(set); + if (!map) + goto err_map; + for (i = 0; i < set->nr_hw_queues; i++) { - hctxs[i] = set->ops->alloc_hctx(set, i); + int node = blk_mq_hw_queue_to_node(map, i); + + hctxs[i] = set->ops->alloc_hctx(set, i, node); if (!hctxs[i]) goto err_hctxs; @@ -1773,7 +1780,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) goto err_hctxs; atomic_set(&hctxs[i]->nr_active, 0); - hctxs[i]->numa_node = NUMA_NO_NODE; + hctxs[i]->numa_node = node; hctxs[i]->queue_num = i; } @@ -1784,15 +1791,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (percpu_counter_init(&q->mq_usage_counter, 0)) goto err_map; - q->mq_map = blk_mq_make_queue_map(set); - if (!q->mq_map) - goto err_map; - setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); blk_queue_rq_timeout(q, 30000); q->nr_queues = nr_cpu_ids; q->nr_hw_queues = set->nr_hw_queues; + q->mq_map = map; q->queue_ctx = ctx; q->queue_hw_ctx = hctxs; @@ -1844,16 +1848,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) err_flush_rq: kfree(q->flush_rq); err_hw: - kfree(q->mq_map); -err_map: blk_cleanup_queue(q); err_hctxs: + kfree(map); for (i = 0; i < set->nr_hw_queues; i++) { if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); set->ops->free_hctx(hctxs[i], i); } +err_map: kfree(hctxs); err_percpu: free_percpu(ctx); diff --git a/block/blk-mq.h b/block/blk-mq.h index 491dbd4e93f5..ff5e6bf0f691 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -52,6 +52,7 @@ void blk_mq_disable_hotplug(void); */ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); /* * Basic implementation of sparser bitmap, allowing the user to spread diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8e7e3a0b0d24..4d33c8c25fbf 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -322,39 +322,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) } static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, - unsigned int hctx_index) + unsigned int hctx_index, + int node) { - int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes); - int tip = (set->nr_hw_queues % nr_online_nodes); - int node = 0, i, n; - - /* - * Split submit queues evenly wrt to the number of nodes. If uneven, - * fill the first buckets with one extra, until the rest is filled with - * no extra. - */ - for (i = 0, n = 1; i < hctx_index; i++, n++) { - if (n % b_size == 0) { - n = 0; - node++; - - tip--; - if (!tip) - b_size = set->nr_hw_queues / nr_online_nodes; - } - } - - /* - * A node might not be online, therefore map the relative node id to the - * real node id. - */ - for_each_online_node(n) { - if (!node) - break; - node--; - } - - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); } static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f76bb18350af..afeb93496907 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -80,7 +80,7 @@ struct blk_mq_tag_set { typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, - unsigned int); + unsigned int, int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); @@ -165,7 +165,7 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, g struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); -- cgit v1.2.3 From 1c86438c9423a26cc9f7f74a8950d9cf9c93bc23 Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Sun, 4 May 2014 12:23:37 +0800 Subject: PCI: Add new pci_is_bridge() interface Add a helper function to check a device's header type for PCI bridge or CardBus bridge. Requires: 326c1cdae741 PCI: Rename pci_is_bridge() to pci_has_subordinate() Signed-off-by: Yijing Wang Signed-off-by: Bjorn Helgaas --- include/linux/pci.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci.h b/include/linux/pci.h index aab57b4abe7f..f2a5946ea0bf 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -477,6 +477,19 @@ static inline bool pci_is_root_bus(struct pci_bus *pbus) return !(pbus->parent); } +/** + * pci_is_bridge - check if the PCI device is a bridge + * @dev: PCI device + * + * Return true if the PCI device is bridge whether it has subordinate + * or not. + */ +static inline bool pci_is_bridge(struct pci_dev *dev) +{ + return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; +} + static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) { dev = pci_physfn(dev); -- cgit v1.2.3 From 1bb6c08abfb653ce6e65d8ab4ddef403227afedf Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Apr 2014 12:54:47 +0200 Subject: driver core: Move driver_data back to struct device Having to allocate memory as part of dev_set_drvdata() is a problem because that memory may never get freed if the device itself is not created. So move driver_data back to struct device. This is a partial revert of commit b4028437. Signed-off-by: Jean Delvare Signed-off-by: Greg Kroah-Hartman --- drivers/base/base.h | 3 --- drivers/base/dd.c | 13 +++---------- include/linux/device.h | 3 +++ 3 files changed, 6 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/base.h b/drivers/base/base.h index 24f424249d9b..251c5d30f963 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -63,8 +63,6 @@ struct driver_private { * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. - * @driver_data - private pointer for driver specific info. Will turn into a - * list soon. * @device - pointer back to the struct class that this structure is * associated with. * @@ -76,7 +74,6 @@ struct device_private { struct klist_node knode_driver; struct klist_node knode_bus; struct list_head deferred_probe; - void *driver_data; struct device *device; }; #define to_device_private_parent(obj) \ diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 62ec61e8f84a..d14b6e895896 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -594,22 +594,15 @@ void driver_detach(struct device_driver *drv) */ void *dev_get_drvdata(const struct device *dev) { - if (dev && dev->p) - return dev->p->driver_data; + if (dev) + return dev->driver_data; return NULL; } EXPORT_SYMBOL(dev_get_drvdata); int dev_set_drvdata(struct device *dev, void *data) { - int error; - - if (!dev->p) { - error = device_private_init(dev); - if (error) - return error; - } - dev->p->driver_data = data; + dev->driver_data = data; return 0; } EXPORT_SYMBOL(dev_set_drvdata); diff --git a/include/linux/device.h b/include/linux/device.h index d1d1c055b48e..5c94ac3e7972 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -673,6 +673,7 @@ struct acpi_dev_node { * variants, which GPIO pins act in what additional roles, and so * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. + * @driver_data: Private pointer for driver specific info. * @power: For device power management. * See Documentation/power/devices.txt for details. * @pm_domain: Provide callbacks that are executed during system suspend, @@ -734,6 +735,8 @@ struct device { device */ void *platform_data; /* Platform specific data, device core doesn't touch it */ + void *driver_data; /* Driver data, set and get with + dev_set/get_drvdata */ struct dev_pm_info power; struct dev_pm_domain *pm_domain; -- cgit v1.2.3 From 4101866c743a3695666e8562b5713b4d7f341cbf Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 27 May 2014 10:53:17 +0200 Subject: ASoC: Add ADAU1X61 and ADAU1X81 CODECs common code The ADAU1X61 and ADAU1X81 are very similar in the digital domain, but are quite different in the analog domain. This patch adds support for the common parts of the ADAU1X61 and ADAU1X81 CODECs. The patch also restores some of the alphabetical order in the Makfile and Kconfig. Signed-off-by: Lars-Peter Clausen Signed-off-by: Mark Brown --- include/linux/platform_data/adau17x1.h | 23 + sound/soc/codecs/Kconfig | 6 +- sound/soc/codecs/Makefile | 6 +- sound/soc/codecs/adau17x1.c | 866 +++++++++++++++++++++++++++++++++ sound/soc/codecs/adau17x1.h | 124 +++++ 5 files changed, 1022 insertions(+), 3 deletions(-) create mode 100644 include/linux/platform_data/adau17x1.h create mode 100644 sound/soc/codecs/adau17x1.c create mode 100644 sound/soc/codecs/adau17x1.h (limited to 'include/linux') diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h new file mode 100644 index 000000000000..f90bd9286f31 --- /dev/null +++ b/include/linux/platform_data/adau17x1.h @@ -0,0 +1,23 @@ +/* + * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961/ADAU1781/ADAU1781 codecs + * + * Copyright 2011-2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__ +#define __LINUX_PLATFORM_DATA_ADAU17X1_H__ + +/** + * enum adau17x1_micbias_voltage - Microphone bias voltage + * @ADAU17X1_MICBIAS_0_90_AVDD: 0.9 * AVDD + * @ADAU17X1_MICBIAS_0_65_AVDD: 0.65 * AVDD + */ +enum adau17x1_micbias_voltage { + ADAU17X1_MICBIAS_0_90_AVDD = 0, + ADAU17X1_MICBIAS_0_65_AVDD = 1, +}; + +#endif diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index f0e840137887..182a39751a91 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -210,13 +210,17 @@ config SND_SOC_AD1980 config SND_SOC_AD73311 tristate +config SND_SOC_ADAU1373 + tristate + config SND_SOC_ADAU1701 tristate "Analog Devices ADAU1701 CODEC" depends on I2C select SND_SOC_SIGMADSP -config SND_SOC_ADAU1373 +config SND_SOC_ADAU17X1 tristate + select SND_SOC_SIGMADSP config SND_SOC_ADAU1977 tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 3c4d275d064b..a8cba3086830 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -7,8 +7,9 @@ snd-soc-ad193x-spi-objs := ad193x-spi.o snd-soc-ad193x-i2c-objs := ad193x-i2c.o snd-soc-ad1980-objs := ad1980.o snd-soc-ad73311-objs := ad73311.o -snd-soc-adau1701-objs := adau1701.o snd-soc-adau1373-objs := adau1373.o +snd-soc-adau1701-objs := adau1701.o +snd-soc-adau17x1-objs := adau17x1.o snd-soc-adau1977-objs := adau1977.o snd-soc-adau1977-spi-objs := adau1977-spi.o snd-soc-adau1977-i2c-objs := adau1977-i2c.o @@ -157,10 +158,11 @@ obj-$(CONFIG_SND_SOC_AD193X_I2C) += snd-soc-ad193x-i2c.o obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o obj-$(CONFIG_SND_SOC_ADAU1373) += snd-soc-adau1373.o +obj-$(CONFIG_SND_SOC_ADAU1701) += snd-soc-adau1701.o +obj-$(CONFIG_SND_SOC_ADAU17X1) += snd-soc-adau17x1.o obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o -obj-$(CONFIG_SND_SOC_ADAU1701) += snd-soc-adau1701.o obj-$(CONFIG_SND_SOC_ADAV80X) += snd-soc-adav80x.o obj-$(CONFIG_SND_SOC_ADAV801) += snd-soc-adav801.o obj-$(CONFIG_SND_SOC_ADAV803) += snd-soc-adav803.o diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c new file mode 100644 index 000000000000..2961fae9670a --- /dev/null +++ b/sound/soc/codecs/adau17x1.c @@ -0,0 +1,866 @@ +/* + * Common code for ADAU1X61 and ADAU1X81 codecs + * + * Copyright 2011-2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sigmadsp.h" +#include "adau17x1.h" + +static const char * const adau17x1_capture_mixer_boost_text[] = { + "Normal operation", "Boost Level 1", "Boost Level 2", "Boost Level 3", +}; + +static SOC_ENUM_SINGLE_DECL(adau17x1_capture_boost_enum, + ADAU17X1_REC_POWER_MGMT, 5, adau17x1_capture_mixer_boost_text); + +static const char * const adau17x1_mic_bias_mode_text[] = { + "Normal operation", "High performance", +}; + +static SOC_ENUM_SINGLE_DECL(adau17x1_mic_bias_mode_enum, + ADAU17X1_MICBIAS, 3, adau17x1_mic_bias_mode_text); + +static const DECLARE_TLV_DB_MINMAX(adau17x1_digital_tlv, -9563, 0); + +static const struct snd_kcontrol_new adau17x1_controls[] = { + SOC_DOUBLE_R_TLV("Digital Capture Volume", + ADAU17X1_LEFT_INPUT_DIGITAL_VOL, + ADAU17X1_RIGHT_INPUT_DIGITAL_VOL, + 0, 0xff, 1, adau17x1_digital_tlv), + SOC_DOUBLE_R_TLV("Digital Playback Volume", ADAU17X1_DAC_CONTROL1, + ADAU17X1_DAC_CONTROL2, 0, 0xff, 1, adau17x1_digital_tlv), + + SOC_SINGLE("ADC High Pass Filter Switch", ADAU17X1_ADC_CONTROL, + 5, 1, 0), + SOC_SINGLE("Playback De-emphasis Switch", ADAU17X1_DAC_CONTROL0, + 2, 1, 0), + + SOC_ENUM("Capture Boost", adau17x1_capture_boost_enum), + + SOC_ENUM("Mic Bias Mode", adau17x1_mic_bias_mode_enum), +}; + +static int adau17x1_pll_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct adau *adau = snd_soc_codec_get_drvdata(w->codec); + int ret; + + if (SND_SOC_DAPM_EVENT_ON(event)) { + adau->pll_regs[5] = 1; + } else { + adau->pll_regs[5] = 0; + /* Bypass the PLL when disabled, otherwise registers will become + * inaccessible. */ + regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL, + ADAU17X1_CLOCK_CONTROL_CORECLK_SRC_PLL, 0); + } + + /* The PLL register is 6 bytes long and can only be written at once. */ + ret = regmap_raw_write(adau->regmap, ADAU17X1_PLL_CONTROL, + adau->pll_regs, ARRAY_SIZE(adau->pll_regs)); + + if (SND_SOC_DAPM_EVENT_ON(event)) { + mdelay(5); + regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL, + ADAU17X1_CLOCK_CONTROL_CORECLK_SRC_PLL, + ADAU17X1_CLOCK_CONTROL_CORECLK_SRC_PLL); + } + + return 0; +} + +static const char * const adau17x1_mono_stereo_text[] = { + "Stereo", + "Mono Left Channel (L+R)", + "Mono Right Channel (L+R)", + "Mono (L+R)", +}; + +static SOC_ENUM_SINGLE_DECL(adau17x1_dac_mode_enum, + ADAU17X1_DAC_CONTROL0, 6, adau17x1_mono_stereo_text); + +static const struct snd_kcontrol_new adau17x1_dac_mode_mux = + SOC_DAPM_ENUM("DAC Mono-Stereo-Mode", adau17x1_dac_mode_enum); + +static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = { + SND_SOC_DAPM_SUPPLY_S("PLL", 3, SND_SOC_NOPM, 0, 0, adau17x1_pll_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_SUPPLY("AIFCLK", SND_SOC_NOPM, 0, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("MICBIAS", ADAU17X1_MICBIAS, 0, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("Left Playback Enable", ADAU17X1_PLAY_POWER_MGMT, + 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Right Playback Enable", ADAU17X1_PLAY_POWER_MGMT, + 1, 0, NULL, 0), + + SND_SOC_DAPM_MUX("Left DAC Mode Mux", SND_SOC_NOPM, 0, 0, + &adau17x1_dac_mode_mux), + SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0, + &adau17x1_dac_mode_mux), + + SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0), + SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0), + SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0), + SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0), +}; + +static const struct snd_soc_dapm_route adau17x1_dapm_routes[] = { + { "Left Decimator", NULL, "SYSCLK" }, + { "Right Decimator", NULL, "SYSCLK" }, + { "Left DAC", NULL, "SYSCLK" }, + { "Right DAC", NULL, "SYSCLK" }, + { "Capture", NULL, "SYSCLK" }, + { "Playback", NULL, "SYSCLK" }, + + { "Left DAC", NULL, "Left DAC Mode Mux" }, + { "Right DAC", NULL, "Right DAC Mode Mux" }, + + { "Capture", NULL, "AIFCLK" }, + { "Playback", NULL, "AIFCLK" }, +}; + +static const struct snd_soc_dapm_route adau17x1_dapm_pll_route = { + "SYSCLK", NULL, "PLL", +}; + +/* + * The MUX register for the Capture and Playback MUXs selects either DSP as + * source/destination or one of the TDM slots. The TDM slot is selected via + * snd_soc_dai_set_tdm_slot(), so we only expose whether to go to the DSP or + * directly to the DAI interface with this control. + */ +static int adau17x1_dsp_mux_enum_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol); + struct adau *adau = snd_soc_codec_get_drvdata(codec); + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + struct snd_soc_dapm_update update; + unsigned int stream = e->shift_l; + unsigned int val, change; + int reg; + + if (ucontrol->value.enumerated.item[0] >= e->items) + return -EINVAL; + + switch (ucontrol->value.enumerated.item[0]) { + case 0: + val = 0; + adau->dsp_bypass[stream] = false; + break; + default: + val = (adau->tdm_slot[stream] * 2) + 1; + adau->dsp_bypass[stream] = true; + break; + } + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + reg = ADAU17X1_SERIAL_INPUT_ROUTE; + else + reg = ADAU17X1_SERIAL_OUTPUT_ROUTE; + + change = snd_soc_test_bits(codec, reg, 0xff, val); + if (change) { + update.kcontrol = kcontrol; + update.mask = 0xff; + update.reg = reg; + update.val = val; + + snd_soc_dapm_mux_update_power(&codec->dapm, kcontrol, + ucontrol->value.enumerated.item[0], e, &update); + } + + return change; +} + +static int adau17x1_dsp_mux_enum_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol); + struct adau *adau = snd_soc_codec_get_drvdata(codec); + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + unsigned int stream = e->shift_l; + unsigned int reg, val; + int ret; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + reg = ADAU17X1_SERIAL_INPUT_ROUTE; + else + reg = ADAU17X1_SERIAL_OUTPUT_ROUTE; + + ret = regmap_read(adau->regmap, reg, &val); + if (ret) + return ret; + + if (val != 0) + val = 1; + ucontrol->value.enumerated.item[0] = val; + + return 0; +} + +#define DECLARE_ADAU17X1_DSP_MUX_CTRL(_name, _label, _stream, _text) \ + const struct snd_kcontrol_new _name = \ + SOC_DAPM_ENUM_EXT(_label, (const struct soc_enum)\ + SOC_ENUM_SINGLE(SND_SOC_NOPM, _stream, \ + ARRAY_SIZE(_text), _text), \ + adau17x1_dsp_mux_enum_get, adau17x1_dsp_mux_enum_put) + +static const char * const adau17x1_dac_mux_text[] = { + "DSP", + "AIFIN", +}; + +static const char * const adau17x1_capture_mux_text[] = { + "DSP", + "Decimator", +}; + +static DECLARE_ADAU17X1_DSP_MUX_CTRL(adau17x1_dac_mux, "DAC Playback Mux", + SNDRV_PCM_STREAM_PLAYBACK, adau17x1_dac_mux_text); + +static DECLARE_ADAU17X1_DSP_MUX_CTRL(adau17x1_capture_mux, "Capture Mux", + SNDRV_PCM_STREAM_CAPTURE, adau17x1_capture_mux_text); + +static const struct snd_soc_dapm_widget adau17x1_dsp_dapm_widgets[] = { + SND_SOC_DAPM_PGA("DSP", ADAU17X1_DSP_RUN, 0, 0, NULL, 0), + SND_SOC_DAPM_SIGGEN("DSP Siggen"), + + SND_SOC_DAPM_MUX("DAC Playback Mux", SND_SOC_NOPM, 0, 0, + &adau17x1_dac_mux), + SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, + &adau17x1_capture_mux), +}; + +static const struct snd_soc_dapm_route adau17x1_dsp_dapm_routes[] = { + { "DAC Playback Mux", "DSP", "DSP" }, + { "DAC Playback Mux", "AIFIN", "Playback" }, + + { "Left DAC Mode Mux", "Stereo", "DAC Playback Mux" }, + { "Left DAC Mode Mux", "Mono (L+R)", "DAC Playback Mux" }, + { "Left DAC Mode Mux", "Mono Left Channel (L+R)", "DAC Playback Mux" }, + { "Right DAC Mode Mux", "Stereo", "DAC Playback Mux" }, + { "Right DAC Mode Mux", "Mono (L+R)", "DAC Playback Mux" }, + { "Right DAC Mode Mux", "Mono Right Channel (L+R)", "DAC Playback Mux" }, + + { "Capture Mux", "DSP", "DSP" }, + { "Capture Mux", "Decimator", "Left Decimator" }, + { "Capture Mux", "Decimator", "Right Decimator" }, + + { "Capture", NULL, "Capture Mux" }, + + { "DSP", NULL, "DSP Siggen" }, + + { "DSP", NULL, "Left Decimator" }, + { "DSP", NULL, "Right Decimator" }, +}; + +static const struct snd_soc_dapm_route adau17x1_no_dsp_dapm_routes[] = { + { "Left DAC Mode Mux", "Stereo", "Playback" }, + { "Left DAC Mode Mux", "Mono (L+R)", "Playback" }, + { "Left DAC Mode Mux", "Mono Left Channel (L+R)", "Playback" }, + { "Right DAC Mode Mux", "Stereo", "Playback" }, + { "Right DAC Mode Mux", "Mono (L+R)", "Playback" }, + { "Right DAC Mode Mux", "Mono Right Channel (L+R)", "Playback" }, + { "Capture", NULL, "Left Decimator" }, + { "Capture", NULL, "Right Decimator" }, +}; + +bool adau17x1_has_dsp(struct adau *adau) +{ + switch (adau->type) { + case ADAU1761: + case ADAU1381: + case ADAU1781: + return true; + default: + return false; + } +} +EXPORT_SYMBOL_GPL(adau17x1_has_dsp); + +static int adau17x1_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct adau *adau = snd_soc_codec_get_drvdata(codec); + unsigned int val, div, dsp_div; + unsigned int freq; + + if (adau->clk_src == ADAU17X1_CLK_SRC_PLL) + freq = adau->pll_freq; + else + freq = adau->sysclk; + + if (freq % params_rate(params) != 0) + return -EINVAL; + + switch (freq / params_rate(params)) { + case 1024: /* fs */ + div = 0; + dsp_div = 1; + break; + case 6144: /* fs / 6 */ + div = 1; + dsp_div = 6; + break; + case 4096: /* fs / 4 */ + div = 2; + dsp_div = 5; + break; + case 3072: /* fs / 3 */ + div = 3; + dsp_div = 4; + break; + case 2048: /* fs / 2 */ + div = 4; + dsp_div = 3; + break; + case 1536: /* fs / 1.5 */ + div = 5; + dsp_div = 2; + break; + case 512: /* fs / 0.5 */ + div = 6; + dsp_div = 0; + break; + default: + return -EINVAL; + } + + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0, + ADAU17X1_CONVERTER0_CONVSR_MASK, div); + if (adau17x1_has_dsp(adau)) { + regmap_write(adau->regmap, ADAU17X1_SERIAL_SAMPLING_RATE, div); + regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dsp_div); + } + + if (adau->dai_fmt != SND_SOC_DAIFMT_RIGHT_J) + return 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + val = ADAU17X1_SERIAL_PORT1_DELAY16; + break; + case SNDRV_PCM_FORMAT_S24_LE: + val = ADAU17X1_SERIAL_PORT1_DELAY8; + break; + case SNDRV_PCM_FORMAT_S32_LE: + val = ADAU17X1_SERIAL_PORT1_DELAY0; + break; + default: + return -EINVAL; + } + + return regmap_update_bits(adau->regmap, ADAU17X1_SERIAL_PORT1, + ADAU17X1_SERIAL_PORT1_DELAY_MASK, val); +} + +static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id, + int source, unsigned int freq_in, unsigned int freq_out) +{ + struct snd_soc_codec *codec = dai->codec; + struct adau *adau = snd_soc_codec_get_drvdata(codec); + unsigned int r, n, m, i, j; + unsigned int div; + int ret; + + if (freq_in < 8000000 || freq_in > 27000000) + return -EINVAL; + + if (!freq_out) { + r = 0; + n = 0; + m = 0; + div = 0; + } else { + if (freq_out % freq_in != 0) { + div = DIV_ROUND_UP(freq_in, 13500000); + freq_in /= div; + r = freq_out / freq_in; + i = freq_out % freq_in; + j = gcd(i, freq_in); + n = i / j; + m = freq_in / j; + div--; + } else { + r = freq_out / freq_in; + n = 0; + m = 0; + div = 0; + } + if (n > 0xffff || m > 0xffff || div > 3 || r > 8 || r < 2) + return -EINVAL; + } + + adau->pll_regs[0] = m >> 8; + adau->pll_regs[1] = m & 0xff; + adau->pll_regs[2] = n >> 8; + adau->pll_regs[3] = n & 0xff; + adau->pll_regs[4] = (r << 3) | (div << 1); + if (m != 0) + adau->pll_regs[4] |= 1; /* Fractional mode */ + + /* The PLL register is 6 bytes long and can only be written at once. */ + ret = regmap_raw_write(adau->regmap, ADAU17X1_PLL_CONTROL, + adau->pll_regs, ARRAY_SIZE(adau->pll_regs)); + if (ret) + return ret; + + adau->pll_freq = freq_out; + + return 0; +} + +static int adau17x1_set_dai_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct adau *adau = snd_soc_codec_get_drvdata(dai->codec); + struct snd_soc_dapm_context *dapm = &dai->codec->dapm; + + switch (clk_id) { + case ADAU17X1_CLK_SRC_MCLK: + case ADAU17X1_CLK_SRC_PLL: + break; + default: + return -EINVAL; + } + + adau->sysclk = freq; + + if (adau->clk_src != clk_id) { + if (clk_id == ADAU17X1_CLK_SRC_PLL) { + snd_soc_dapm_add_routes(dapm, + &adau17x1_dapm_pll_route, 1); + } else { + snd_soc_dapm_del_routes(dapm, + &adau17x1_dapm_pll_route, 1); + } + } + + adau->clk_src = clk_id; + + return 0; +} + +static int adau17x1_set_dai_fmt(struct snd_soc_dai *dai, + unsigned int fmt) +{ + struct adau *adau = snd_soc_codec_get_drvdata(dai->codec); + unsigned int ctrl0, ctrl1; + int lrclk_pol; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + ctrl0 = ADAU17X1_SERIAL_PORT0_MASTER; + adau->master = true; + break; + case SND_SOC_DAIFMT_CBS_CFS: + ctrl0 = 0; + adau->master = false; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + lrclk_pol = 0; + ctrl1 = ADAU17X1_SERIAL_PORT1_DELAY1; + break; + case SND_SOC_DAIFMT_LEFT_J: + case SND_SOC_DAIFMT_RIGHT_J: + lrclk_pol = 1; + ctrl1 = ADAU17X1_SERIAL_PORT1_DELAY0; + break; + case SND_SOC_DAIFMT_DSP_A: + lrclk_pol = 1; + ctrl0 |= ADAU17X1_SERIAL_PORT0_PULSE_MODE; + ctrl1 = ADAU17X1_SERIAL_PORT1_DELAY1; + break; + case SND_SOC_DAIFMT_DSP_B: + lrclk_pol = 1; + ctrl0 |= ADAU17X1_SERIAL_PORT0_PULSE_MODE; + ctrl1 = ADAU17X1_SERIAL_PORT1_DELAY0; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_IB_NF: + ctrl0 |= ADAU17X1_SERIAL_PORT0_BCLK_POL; + break; + case SND_SOC_DAIFMT_NB_IF: + lrclk_pol = !lrclk_pol; + break; + case SND_SOC_DAIFMT_IB_IF: + ctrl0 |= ADAU17X1_SERIAL_PORT0_BCLK_POL; + lrclk_pol = !lrclk_pol; + break; + default: + return -EINVAL; + } + + if (lrclk_pol) + ctrl0 |= ADAU17X1_SERIAL_PORT0_LRCLK_POL; + + regmap_write(adau->regmap, ADAU17X1_SERIAL_PORT0, ctrl0); + regmap_write(adau->regmap, ADAU17X1_SERIAL_PORT1, ctrl1); + + adau->dai_fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK; + + return 0; +} + +static int adau17x1_set_dai_tdm_slot(struct snd_soc_dai *dai, + unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) +{ + struct adau *adau = snd_soc_codec_get_drvdata(dai->codec); + unsigned int ser_ctrl0, ser_ctrl1; + unsigned int conv_ctrl0, conv_ctrl1; + + /* I2S mode */ + if (slots == 0) { + slots = 2; + rx_mask = 3; + tx_mask = 3; + slot_width = 32; + } + + switch (slots) { + case 2: + ser_ctrl0 = ADAU17X1_SERIAL_PORT0_STEREO; + break; + case 4: + ser_ctrl0 = ADAU17X1_SERIAL_PORT0_TDM4; + break; + case 8: + if (adau->type == ADAU1361) + return -EINVAL; + + ser_ctrl0 = ADAU17X1_SERIAL_PORT0_TDM8; + break; + default: + return -EINVAL; + } + + switch (slot_width * slots) { + case 32: + if (adau->type == ADAU1761) + return -EINVAL; + + ser_ctrl1 = ADAU17X1_SERIAL_PORT1_BCLK32; + break; + case 64: + ser_ctrl1 = ADAU17X1_SERIAL_PORT1_BCLK64; + break; + case 48: + ser_ctrl1 = ADAU17X1_SERIAL_PORT1_BCLK48; + break; + case 128: + ser_ctrl1 = ADAU17X1_SERIAL_PORT1_BCLK128; + break; + case 256: + if (adau->type == ADAU1361) + return -EINVAL; + + ser_ctrl1 = ADAU17X1_SERIAL_PORT1_BCLK256; + break; + default: + return -EINVAL; + } + + switch (rx_mask) { + case 0x03: + conv_ctrl1 = ADAU17X1_CONVERTER1_ADC_PAIR(1); + adau->tdm_slot[SNDRV_PCM_STREAM_CAPTURE] = 0; + break; + case 0x0c: + conv_ctrl1 = ADAU17X1_CONVERTER1_ADC_PAIR(2); + adau->tdm_slot[SNDRV_PCM_STREAM_CAPTURE] = 1; + break; + case 0x30: + conv_ctrl1 = ADAU17X1_CONVERTER1_ADC_PAIR(3); + adau->tdm_slot[SNDRV_PCM_STREAM_CAPTURE] = 2; + break; + case 0xc0: + conv_ctrl1 = ADAU17X1_CONVERTER1_ADC_PAIR(4); + adau->tdm_slot[SNDRV_PCM_STREAM_CAPTURE] = 3; + break; + default: + return -EINVAL; + } + + switch (tx_mask) { + case 0x03: + conv_ctrl0 = ADAU17X1_CONVERTER0_DAC_PAIR(1); + adau->tdm_slot[SNDRV_PCM_STREAM_PLAYBACK] = 0; + break; + case 0x0c: + conv_ctrl0 = ADAU17X1_CONVERTER0_DAC_PAIR(2); + adau->tdm_slot[SNDRV_PCM_STREAM_PLAYBACK] = 1; + break; + case 0x30: + conv_ctrl0 = ADAU17X1_CONVERTER0_DAC_PAIR(3); + adau->tdm_slot[SNDRV_PCM_STREAM_PLAYBACK] = 2; + break; + case 0xc0: + conv_ctrl0 = ADAU17X1_CONVERTER0_DAC_PAIR(4); + adau->tdm_slot[SNDRV_PCM_STREAM_PLAYBACK] = 3; + break; + default: + return -EINVAL; + } + + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0, + ADAU17X1_CONVERTER0_DAC_PAIR_MASK, conv_ctrl0); + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER1, + ADAU17X1_CONVERTER1_ADC_PAIR_MASK, conv_ctrl1); + regmap_update_bits(adau->regmap, ADAU17X1_SERIAL_PORT0, + ADAU17X1_SERIAL_PORT0_TDM_MASK, ser_ctrl0); + regmap_update_bits(adau->regmap, ADAU17X1_SERIAL_PORT1, + ADAU17X1_SERIAL_PORT1_BCLK_MASK, ser_ctrl1); + + if (!adau17x1_has_dsp(adau)) + return 0; + + if (adau->dsp_bypass[SNDRV_PCM_STREAM_PLAYBACK]) { + regmap_write(adau->regmap, ADAU17X1_SERIAL_INPUT_ROUTE, + (adau->tdm_slot[SNDRV_PCM_STREAM_PLAYBACK] * 2) + 1); + } + + if (adau->dsp_bypass[SNDRV_PCM_STREAM_CAPTURE]) { + regmap_write(adau->regmap, ADAU17X1_SERIAL_OUTPUT_ROUTE, + (adau->tdm_slot[SNDRV_PCM_STREAM_CAPTURE] * 2) + 1); + } + + return 0; +} + +const struct snd_soc_dai_ops adau17x1_dai_ops = { + .hw_params = adau17x1_hw_params, + .set_sysclk = adau17x1_set_dai_sysclk, + .set_fmt = adau17x1_set_dai_fmt, + .set_pll = adau17x1_set_dai_pll, + .set_tdm_slot = adau17x1_set_dai_tdm_slot, +}; +EXPORT_SYMBOL_GPL(adau17x1_dai_ops); + +int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec, + enum adau17x1_micbias_voltage micbias) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + + switch (micbias) { + case ADAU17X1_MICBIAS_0_90_AVDD: + case ADAU17X1_MICBIAS_0_65_AVDD: + break; + default: + return -EINVAL; + } + + return regmap_write(adau->regmap, ADAU17X1_MICBIAS, micbias << 2); +} +EXPORT_SYMBOL_GPL(adau17x1_set_micbias_voltage); + +bool adau17x1_readable_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ADAU17X1_CLOCK_CONTROL: + case ADAU17X1_PLL_CONTROL: + case ADAU17X1_REC_POWER_MGMT: + case ADAU17X1_MICBIAS: + case ADAU17X1_SERIAL_PORT0: + case ADAU17X1_SERIAL_PORT1: + case ADAU17X1_CONVERTER0: + case ADAU17X1_CONVERTER1: + case ADAU17X1_LEFT_INPUT_DIGITAL_VOL: + case ADAU17X1_RIGHT_INPUT_DIGITAL_VOL: + case ADAU17X1_ADC_CONTROL: + case ADAU17X1_PLAY_POWER_MGMT: + case ADAU17X1_DAC_CONTROL0: + case ADAU17X1_DAC_CONTROL1: + case ADAU17X1_DAC_CONTROL2: + case ADAU17X1_SERIAL_PORT_PAD: + case ADAU17X1_CONTROL_PORT_PAD0: + case ADAU17X1_CONTROL_PORT_PAD1: + case ADAU17X1_DSP_SAMPLING_RATE: + case ADAU17X1_SERIAL_INPUT_ROUTE: + case ADAU17X1_SERIAL_OUTPUT_ROUTE: + case ADAU17X1_DSP_ENABLE: + case ADAU17X1_DSP_RUN: + case ADAU17X1_SERIAL_SAMPLING_RATE: + return true; + default: + break; + } + return false; +} +EXPORT_SYMBOL_GPL(adau17x1_readable_register); + +bool adau17x1_volatile_register(struct device *dev, unsigned int reg) +{ + /* SigmaDSP parameter and program memory */ + if (reg < 0x4000) + return true; + + switch (reg) { + /* The PLL register is 6 bytes long */ + case ADAU17X1_PLL_CONTROL: + case ADAU17X1_PLL_CONTROL + 1: + case ADAU17X1_PLL_CONTROL + 2: + case ADAU17X1_PLL_CONTROL + 3: + case ADAU17X1_PLL_CONTROL + 4: + case ADAU17X1_PLL_CONTROL + 5: + return true; + default: + break; + } + + return false; +} +EXPORT_SYMBOL_GPL(adau17x1_volatile_register); + +int adau17x1_load_firmware(struct adau *adau, struct device *dev, + const char *firmware) +{ + int ret; + int dspsr; + + ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr); + if (ret) + return ret; + + regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1); + regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf); + + ret = process_sigma_firmware_regmap(dev, adau->regmap, firmware); + if (ret) { + regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0); + return ret; + } + regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr); + + return 0; +} +EXPORT_SYMBOL_GPL(adau17x1_load_firmware); + +int adau17x1_add_widgets(struct snd_soc_codec *codec) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + int ret; + + ret = snd_soc_add_codec_controls(codec, adau17x1_controls, + ARRAY_SIZE(adau17x1_controls)); + if (ret) + return ret; + ret = snd_soc_dapm_new_controls(&codec->dapm, adau17x1_dapm_widgets, + ARRAY_SIZE(adau17x1_dapm_widgets)); + if (ret) + return ret; + + if (adau17x1_has_dsp(adau)) { + ret = snd_soc_dapm_new_controls(&codec->dapm, + adau17x1_dsp_dapm_widgets, + ARRAY_SIZE(adau17x1_dsp_dapm_widgets)); + } + return ret; +} +EXPORT_SYMBOL_GPL(adau17x1_add_widgets); + +int adau17x1_add_routes(struct snd_soc_codec *codec) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + int ret; + + ret = snd_soc_dapm_add_routes(&codec->dapm, adau17x1_dapm_routes, + ARRAY_SIZE(adau17x1_dapm_routes)); + if (ret) + return ret; + + if (adau17x1_has_dsp(adau)) { + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau17x1_dsp_dapm_routes, + ARRAY_SIZE(adau17x1_dsp_dapm_routes)); + } else { + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau17x1_no_dsp_dapm_routes, + ARRAY_SIZE(adau17x1_no_dsp_dapm_routes)); + } + return ret; +} +EXPORT_SYMBOL_GPL(adau17x1_add_routes); + +int adau17x1_suspend(struct snd_soc_codec *codec) +{ + codec->driver->set_bias_level(codec, SND_SOC_BIAS_OFF); + return 0; +} +EXPORT_SYMBOL_GPL(adau17x1_suspend); + +int adau17x1_resume(struct snd_soc_codec *codec) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + + if (adau->switch_mode) + adau->switch_mode(codec->dev); + + codec->driver->set_bias_level(codec, SND_SOC_BIAS_STANDBY); + regcache_sync(adau->regmap); + + return 0; +} +EXPORT_SYMBOL_GPL(adau17x1_resume); + +int adau17x1_probe(struct device *dev, struct regmap *regmap, + enum adau17x1_type type, void (*switch_mode)(struct device *dev)) +{ + struct adau *adau; + + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + adau = devm_kzalloc(dev, sizeof(*adau), GFP_KERNEL); + if (!adau) + return -ENOMEM; + + adau->regmap = regmap; + adau->switch_mode = switch_mode; + adau->type = type; + + dev_set_drvdata(dev, adau); + + if (switch_mode) + switch_mode(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(adau17x1_probe); + +MODULE_DESCRIPTION("ASoC ADAU1X61/ADAU1X81 common code"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h new file mode 100644 index 000000000000..3ffabaf4c7a8 --- /dev/null +++ b/sound/soc/codecs/adau17x1.h @@ -0,0 +1,124 @@ +#ifndef __ADAU17X1_H__ +#define __ADAU17X1_H__ + +#include +#include + +enum adau17x1_type { + ADAU1361, + ADAU1761, + ADAU1381, + ADAU1781, +}; + +enum adau17x1_pll { + ADAU17X1_PLL, +}; + +enum adau17x1_pll_src { + ADAU17X1_PLL_SRC_MCLK, +}; + +enum adau17x1_clk_src { + ADAU17X1_CLK_SRC_MCLK, + ADAU17X1_CLK_SRC_PLL, +}; + +struct adau { + unsigned int sysclk; + unsigned int pll_freq; + + enum adau17x1_clk_src clk_src; + enum adau17x1_type type; + void (*switch_mode)(struct device *dev); + + unsigned int dai_fmt; + + uint8_t pll_regs[6]; + + bool master; + + unsigned int tdm_slot[2]; + bool dsp_bypass[2]; + + struct regmap *regmap; +}; + +int adau17x1_add_widgets(struct snd_soc_codec *codec); +int adau17x1_add_routes(struct snd_soc_codec *codec); +int adau17x1_probe(struct device *dev, struct regmap *regmap, + enum adau17x1_type type, void (*switch_mode)(struct device *dev)); +int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec, + enum adau17x1_micbias_voltage micbias); +bool adau17x1_readable_register(struct device *dev, unsigned int reg); +bool adau17x1_volatile_register(struct device *dev, unsigned int reg); +int adau17x1_suspend(struct snd_soc_codec *codec); +int adau17x1_resume(struct snd_soc_codec *codec); + +extern const struct snd_soc_dai_ops adau17x1_dai_ops; + +int adau17x1_load_firmware(struct adau *adau, struct device *dev, + const char *firmware); +bool adau17x1_has_dsp(struct adau *adau); + +#define ADAU17X1_CLOCK_CONTROL 0x4000 +#define ADAU17X1_PLL_CONTROL 0x4002 +#define ADAU17X1_REC_POWER_MGMT 0x4009 +#define ADAU17X1_MICBIAS 0x4010 +#define ADAU17X1_SERIAL_PORT0 0x4015 +#define ADAU17X1_SERIAL_PORT1 0x4016 +#define ADAU17X1_CONVERTER0 0x4017 +#define ADAU17X1_CONVERTER1 0x4018 +#define ADAU17X1_LEFT_INPUT_DIGITAL_VOL 0x401a +#define ADAU17X1_RIGHT_INPUT_DIGITAL_VOL 0x401b +#define ADAU17X1_ADC_CONTROL 0x4019 +#define ADAU17X1_PLAY_POWER_MGMT 0x4029 +#define ADAU17X1_DAC_CONTROL0 0x402a +#define ADAU17X1_DAC_CONTROL1 0x402b +#define ADAU17X1_DAC_CONTROL2 0x402c +#define ADAU17X1_SERIAL_PORT_PAD 0x402d +#define ADAU17X1_CONTROL_PORT_PAD0 0x402f +#define ADAU17X1_CONTROL_PORT_PAD1 0x4030 +#define ADAU17X1_DSP_SAMPLING_RATE 0x40eb +#define ADAU17X1_SERIAL_INPUT_ROUTE 0x40f2 +#define ADAU17X1_SERIAL_OUTPUT_ROUTE 0x40f3 +#define ADAU17X1_DSP_ENABLE 0x40f5 +#define ADAU17X1_DSP_RUN 0x40f6 +#define ADAU17X1_SERIAL_SAMPLING_RATE 0x40f8 + +#define ADAU17X1_SERIAL_PORT0_BCLK_POL BIT(4) +#define ADAU17X1_SERIAL_PORT0_LRCLK_POL BIT(3) +#define ADAU17X1_SERIAL_PORT0_MASTER BIT(0) + +#define ADAU17X1_SERIAL_PORT1_DELAY1 0x00 +#define ADAU17X1_SERIAL_PORT1_DELAY0 0x01 +#define ADAU17X1_SERIAL_PORT1_DELAY8 0x02 +#define ADAU17X1_SERIAL_PORT1_DELAY16 0x03 +#define ADAU17X1_SERIAL_PORT1_DELAY_MASK 0x03 + +#define ADAU17X1_CLOCK_CONTROL_INFREQ_MASK 0x6 +#define ADAU17X1_CLOCK_CONTROL_CORECLK_SRC_PLL BIT(3) +#define ADAU17X1_CLOCK_CONTROL_SYSCLK_EN BIT(0) + +#define ADAU17X1_SERIAL_PORT1_BCLK32 (0x0 << 5) +#define ADAU17X1_SERIAL_PORT1_BCLK48 (0x1 << 5) +#define ADAU17X1_SERIAL_PORT1_BCLK64 (0x2 << 5) +#define ADAU17X1_SERIAL_PORT1_BCLK128 (0x3 << 5) +#define ADAU17X1_SERIAL_PORT1_BCLK256 (0x4 << 5) +#define ADAU17X1_SERIAL_PORT1_BCLK_MASK (0x7 << 5) + +#define ADAU17X1_SERIAL_PORT0_STEREO (0x0 << 1) +#define ADAU17X1_SERIAL_PORT0_TDM4 (0x1 << 1) +#define ADAU17X1_SERIAL_PORT0_TDM8 (0x2 << 1) +#define ADAU17X1_SERIAL_PORT0_TDM_MASK (0x3 << 1) +#define ADAU17X1_SERIAL_PORT0_PULSE_MODE BIT(5) + +#define ADAU17X1_CONVERTER0_DAC_PAIR(x) (((x) - 1) << 5) +#define ADAU17X1_CONVERTER0_DAC_PAIR_MASK (0x3 << 5) +#define ADAU17X1_CONVERTER1_ADC_PAIR(x) ((x) - 1) +#define ADAU17X1_CONVERTER1_ADC_PAIR_MASK 0x3 + +#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7 + + +#endif -- cgit v1.2.3 From dab464b60b2435a2aaae3630266db8ad130b7fad Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 27 May 2014 10:53:18 +0200 Subject: ASoC: Add ADAU1361/ADAU1761 audio CODEC support This patch adds support for the Analog Devices ADAU1361 and ADAU1761 CODECs. The device is a a low-power, 24-bit stereo audio CODEC with multiple analog input and outputs, one digital microphone input and an I2S interface. The device can be controlled either via I2C or SPI. The main difference between the two variants is that the ADAU1761 has a built-in SigmaDSP, while the ADAU1361 has not. Signed-off-by: Lars-Peter Clausen Signed-off-by: Mark Brown --- include/linux/platform_data/adau17x1.h | 67 +++ sound/soc/codecs/Kconfig | 16 + sound/soc/codecs/Makefile | 6 + sound/soc/codecs/adau1761-i2c.c | 60 +++ sound/soc/codecs/adau1761-spi.c | 77 ++++ sound/soc/codecs/adau1761.c | 803 +++++++++++++++++++++++++++++++++ sound/soc/codecs/adau1761.h | 23 + 7 files changed, 1052 insertions(+) create mode 100644 sound/soc/codecs/adau1761-i2c.c create mode 100644 sound/soc/codecs/adau1761-spi.c create mode 100644 sound/soc/codecs/adau1761.c create mode 100644 sound/soc/codecs/adau1761.h (limited to 'include/linux') diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h index f90bd9286f31..d234d9e46fd6 100644 --- a/include/linux/platform_data/adau17x1.h +++ b/include/linux/platform_data/adau17x1.h @@ -20,4 +20,71 @@ enum adau17x1_micbias_voltage { ADAU17X1_MICBIAS_0_65_AVDD = 1, }; +/** + * enum adau1761_digmic_jackdet_pin_mode - Configuration of the JACKDET/MICIN pin + * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: Disable the pin + * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC: Configure the pin for usage as + * digital microphone input. + * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT: Configure the pin for jack + * insertion detection. + */ +enum adau1761_digmic_jackdet_pin_mode { + ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE, + ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC, + ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT, +}; + +/** + * adau1761_jackdetect_debounce_time - Jack insertion detection debounce time + * @ADAU1761_JACKDETECT_DEBOUNCE_5MS: 5 milliseconds + * @ADAU1761_JACKDETECT_DEBOUNCE_10MS: 10 milliseconds + * @ADAU1761_JACKDETECT_DEBOUNCE_20MS: 20 milliseconds + * @ADAU1761_JACKDETECT_DEBOUNCE_40MS: 40 milliseconds + */ +enum adau1761_jackdetect_debounce_time { + ADAU1761_JACKDETECT_DEBOUNCE_5MS = 0, + ADAU1761_JACKDETECT_DEBOUNCE_10MS = 1, + ADAU1761_JACKDETECT_DEBOUNCE_20MS = 2, + ADAU1761_JACKDETECT_DEBOUNCE_40MS = 3, +}; + +/** + * enum adau1761_output_mode - Output mode configuration + * @ADAU1761_OUTPUT_MODE_HEADPHONE: Headphone output + * @ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS: Capless headphone output + * @ADAU1761_OUTPUT_MODE_LINE: Line output + */ +enum adau1761_output_mode { + ADAU1761_OUTPUT_MODE_HEADPHONE, + ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS, + ADAU1761_OUTPUT_MODE_LINE, +}; + +/** + * struct adau1761_platform_data - ADAU1761 Codec driver platform data + * @input_differential: If true the input pins will be configured in + * differential mode. + * @lineout_mode: Output mode for the LOUT/ROUT pins + * @headphone_mode: Output mode for the LHP/RHP pins + * @digmic_jackdetect_pin_mode: JACKDET/MICIN pin configuration + * @jackdetect_debounce_time: Jack insertion detection debounce time. + * Note: This value will only be used, if the JACKDET/MICIN pin is configured + * for jack insertion detection. + * @jackdetect_active_low: If true the jack insertion detection is active low. + * Othwise it will be active high. + * @micbias_voltage: Microphone voltage bias + */ +struct adau1761_platform_data { + bool input_differential; + enum adau1761_output_mode lineout_mode; + enum adau1761_output_mode headphone_mode; + + enum adau1761_digmic_jackdet_pin_mode digmic_jackdetect_pin_mode; + + enum adau1761_jackdetect_debounce_time jackdetect_debounce_time; + bool jackdetect_active_low; + + enum adau17x1_micbias_voltage micbias_voltage; +}; + #endif diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 182a39751a91..4233ed118c48 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -23,6 +23,8 @@ config SND_SOC_ALL_CODECS select SND_SOC_AD1980 if SND_SOC_AC97_BUS select SND_SOC_AD73311 select SND_SOC_ADAU1373 if I2C + select SND_SOC_ADAU1761_I2C if I2C + select SND_SOC_ADAU1761_SPI if SPI select SND_SOC_ADAV801 if SPI_MASTER select SND_SOC_ADAV803 if I2C select SND_SOC_ADAU1977_SPI if SPI_MASTER @@ -222,6 +224,20 @@ config SND_SOC_ADAU17X1 tristate select SND_SOC_SIGMADSP +config SND_SOC_ADAU1761 + tristate + select SND_SOC_ADAU17X1 + +config SND_SOC_ADAU1761_I2C + tristate + select SND_SOC_ADAU1761 + select REGMAP_I2C + +config SND_SOC_ADAU1761_SPI + tristate + select SND_SOC_ADAU1761 + select REGMAP_SPI + config SND_SOC_ADAU1977 tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index a8cba3086830..e96499ca76bd 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -10,6 +10,9 @@ snd-soc-ad73311-objs := ad73311.o snd-soc-adau1373-objs := adau1373.o snd-soc-adau1701-objs := adau1701.o snd-soc-adau17x1-objs := adau17x1.o +snd-soc-adau1761-objs := adau1761.o +snd-soc-adau1761-i2c-objs := adau1761-i2c.o +snd-soc-adau1761-spi-objs := adau1761-spi.o snd-soc-adau1977-objs := adau1977.o snd-soc-adau1977-spi-objs := adau1977-spi.o snd-soc-adau1977-i2c-objs := adau1977-i2c.o @@ -160,6 +163,9 @@ obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o obj-$(CONFIG_SND_SOC_ADAU1373) += snd-soc-adau1373.o obj-$(CONFIG_SND_SOC_ADAU1701) += snd-soc-adau1701.o obj-$(CONFIG_SND_SOC_ADAU17X1) += snd-soc-adau17x1.o +obj-$(CONFIG_SND_SOC_ADAU1761) += snd-soc-adau1761.o +obj-$(CONFIG_SND_SOC_ADAU1761_I2C) += snd-soc-adau1761-i2c.o +obj-$(CONFIG_SND_SOC_ADAU1761_SPI) += snd-soc-adau1761-spi.o obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c new file mode 100644 index 000000000000..862796dec693 --- /dev/null +++ b/sound/soc/codecs/adau1761-i2c.c @@ -0,0 +1,60 @@ +/* + * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961 codec + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include + +#include "adau1761.h" + +static int adau1761_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct regmap_config config; + + config = adau1761_regmap_config; + config.val_bits = 8; + config.reg_bits = 16; + + return adau1761_probe(&client->dev, + devm_regmap_init_i2c(client, &config), + id->driver_data, NULL); +} + +static int adau1761_i2c_remove(struct i2c_client *client) +{ + snd_soc_unregister_codec(&client->dev); + return 0; +} + +static const struct i2c_device_id adau1761_i2c_ids[] = { + { "adau1361", ADAU1361 }, + { "adau1461", ADAU1761 }, + { "adau1761", ADAU1761 }, + { "adau1961", ADAU1361 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adau1761_i2c_ids); + +static struct i2c_driver adau1761_i2c_driver = { + .driver = { + .name = "adau1761", + .owner = THIS_MODULE, + }, + .probe = adau1761_i2c_probe, + .remove = adau1761_i2c_remove, + .id_table = adau1761_i2c_ids, +}; +module_i2c_driver(adau1761_i2c_driver); + +MODULE_DESCRIPTION("ASoC ADAU1361/ADAU1461/ADAU1761/ADAU1961 CODEC I2C driver"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau1761-spi.c b/sound/soc/codecs/adau1761-spi.c new file mode 100644 index 000000000000..cce2f11f1ffb --- /dev/null +++ b/sound/soc/codecs/adau1761-spi.c @@ -0,0 +1,77 @@ +/* + * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961 codec + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include + +#include "adau1761.h" + +static void adau1761_spi_switch_mode(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + /* + * To get the device into SPI mode CLATCH has to be pulled low three + * times. Do this by issuing three dummy reads. + */ + spi_w8r8(spi, 0x00); + spi_w8r8(spi, 0x00); + spi_w8r8(spi, 0x00); +} + +static int adau1761_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + struct regmap_config config; + + if (!id) + return -EINVAL; + + config = adau1761_regmap_config; + config.val_bits = 8; + config.reg_bits = 24; + config.read_flag_mask = 0x1; + + return adau1761_probe(&spi->dev, + devm_regmap_init_spi(spi, &config), + id->driver_data, adau1761_spi_switch_mode); +} + +static int adau1761_spi_remove(struct spi_device *spi) +{ + snd_soc_unregister_codec(&spi->dev); + return 0; +} + +static const struct spi_device_id adau1761_spi_id[] = { + { "adau1361", ADAU1361 }, + { "adau1461", ADAU1761 }, + { "adau1761", ADAU1761 }, + { "adau1961", ADAU1361 }, + { } +}; +MODULE_DEVICE_TABLE(spi, adau1761_spi_id); + +static struct spi_driver adau1761_spi_driver = { + .driver = { + .name = "adau1761", + .owner = THIS_MODULE, + }, + .probe = adau1761_spi_probe, + .remove = adau1761_spi_remove, + .id_table = adau1761_spi_id, +}; +module_spi_driver(adau1761_spi_driver); + +MODULE_DESCRIPTION("ASoC ADAU1361/ADAU1461/ADAU1761/ADAU1961 CODEC SPI driver"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c new file mode 100644 index 000000000000..848cab839553 --- /dev/null +++ b/sound/soc/codecs/adau1761.c @@ -0,0 +1,803 @@ +/* + * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961 codec + * + * Copyright 2011-2013 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adau17x1.h" +#include "adau1761.h" + +#define ADAU1761_DIGMIC_JACKDETECT 0x4008 +#define ADAU1761_REC_MIXER_LEFT0 0x400a +#define ADAU1761_REC_MIXER_LEFT1 0x400b +#define ADAU1761_REC_MIXER_RIGHT0 0x400c +#define ADAU1761_REC_MIXER_RIGHT1 0x400d +#define ADAU1761_LEFT_DIFF_INPUT_VOL 0x400e +#define ADAU1761_RIGHT_DIFF_INPUT_VOL 0x400f +#define ADAU1761_PLAY_LR_MIXER_LEFT 0x4020 +#define ADAU1761_PLAY_MIXER_LEFT0 0x401c +#define ADAU1761_PLAY_MIXER_LEFT1 0x401d +#define ADAU1761_PLAY_MIXER_RIGHT0 0x401e +#define ADAU1761_PLAY_MIXER_RIGHT1 0x401f +#define ADAU1761_PLAY_LR_MIXER_RIGHT 0x4021 +#define ADAU1761_PLAY_MIXER_MONO 0x4022 +#define ADAU1761_PLAY_HP_LEFT_VOL 0x4023 +#define ADAU1761_PLAY_HP_RIGHT_VOL 0x4024 +#define ADAU1761_PLAY_LINE_LEFT_VOL 0x4025 +#define ADAU1761_PLAY_LINE_RIGHT_VOL 0x4026 +#define ADAU1761_PLAY_MONO_OUTPUT_VOL 0x4027 +#define ADAU1761_POP_CLICK_SUPPRESS 0x4028 +#define ADAU1761_JACK_DETECT_PIN 0x4031 +#define ADAU1761_DEJITTER 0x4036 +#define ADAU1761_CLK_ENABLE0 0x40f9 +#define ADAU1761_CLK_ENABLE1 0x40fa + +#define ADAU1761_DIGMIC_JACKDETECT_ACTIVE_LOW BIT(0) +#define ADAU1761_DIGMIC_JACKDETECT_DIGMIC BIT(5) + +#define ADAU1761_DIFF_INPUT_VOL_LDEN BIT(0) + +#define ADAU1761_PLAY_MONO_OUTPUT_VOL_MODE_HP BIT(0) +#define ADAU1761_PLAY_MONO_OUTPUT_VOL_UNMUTE BIT(1) + +#define ADAU1761_PLAY_HP_RIGHT_VOL_MODE_HP BIT(0) + +#define ADAU1761_PLAY_LINE_LEFT_VOL_MODE_HP BIT(0) + +#define ADAU1761_PLAY_LINE_RIGHT_VOL_MODE_HP BIT(0) + + +#define ADAU1761_FIRMWARE "adau1761.bin" + +static const struct reg_default adau1761_reg_defaults[] = { + { ADAU1761_DEJITTER, 0x03 }, + { ADAU1761_DIGMIC_JACKDETECT, 0x00 }, + { ADAU1761_REC_MIXER_LEFT0, 0x00 }, + { ADAU1761_REC_MIXER_LEFT1, 0x00 }, + { ADAU1761_REC_MIXER_RIGHT0, 0x00 }, + { ADAU1761_REC_MIXER_RIGHT1, 0x00 }, + { ADAU1761_LEFT_DIFF_INPUT_VOL, 0x00 }, + { ADAU1761_RIGHT_DIFF_INPUT_VOL, 0x00 }, + { ADAU1761_PLAY_LR_MIXER_LEFT, 0x00 }, + { ADAU1761_PLAY_MIXER_LEFT0, 0x00 }, + { ADAU1761_PLAY_MIXER_LEFT1, 0x00 }, + { ADAU1761_PLAY_MIXER_RIGHT0, 0x00 }, + { ADAU1761_PLAY_MIXER_RIGHT1, 0x00 }, + { ADAU1761_PLAY_LR_MIXER_RIGHT, 0x00 }, + { ADAU1761_PLAY_MIXER_MONO, 0x00 }, + { ADAU1761_PLAY_HP_LEFT_VOL, 0x00 }, + { ADAU1761_PLAY_HP_RIGHT_VOL, 0x00 }, + { ADAU1761_PLAY_LINE_LEFT_VOL, 0x00 }, + { ADAU1761_PLAY_LINE_RIGHT_VOL, 0x00 }, + { ADAU1761_PLAY_MONO_OUTPUT_VOL, 0x00 }, + { ADAU1761_POP_CLICK_SUPPRESS, 0x00 }, + { ADAU1761_JACK_DETECT_PIN, 0x00 }, + { ADAU1761_CLK_ENABLE0, 0x00 }, + { ADAU1761_CLK_ENABLE1, 0x00 }, + { ADAU17X1_CLOCK_CONTROL, 0x00 }, + { ADAU17X1_PLL_CONTROL, 0x00 }, + { ADAU17X1_REC_POWER_MGMT, 0x00 }, + { ADAU17X1_MICBIAS, 0x00 }, + { ADAU17X1_SERIAL_PORT0, 0x00 }, + { ADAU17X1_SERIAL_PORT1, 0x00 }, + { ADAU17X1_CONVERTER0, 0x00 }, + { ADAU17X1_CONVERTER1, 0x00 }, + { ADAU17X1_LEFT_INPUT_DIGITAL_VOL, 0x00 }, + { ADAU17X1_RIGHT_INPUT_DIGITAL_VOL, 0x00 }, + { ADAU17X1_ADC_CONTROL, 0x00 }, + { ADAU17X1_PLAY_POWER_MGMT, 0x00 }, + { ADAU17X1_DAC_CONTROL0, 0x00 }, + { ADAU17X1_DAC_CONTROL1, 0x00 }, + { ADAU17X1_DAC_CONTROL2, 0x00 }, + { ADAU17X1_SERIAL_PORT_PAD, 0xaa }, + { ADAU17X1_CONTROL_PORT_PAD0, 0xaa }, + { ADAU17X1_CONTROL_PORT_PAD1, 0x00 }, + { ADAU17X1_DSP_SAMPLING_RATE, 0x01 }, + { ADAU17X1_SERIAL_INPUT_ROUTE, 0x00 }, + { ADAU17X1_SERIAL_OUTPUT_ROUTE, 0x00 }, + { ADAU17X1_DSP_ENABLE, 0x00 }, + { ADAU17X1_DSP_RUN, 0x00 }, + { ADAU17X1_SERIAL_SAMPLING_RATE, 0x00 }, +}; + +static const DECLARE_TLV_DB_SCALE(adau1761_sing_in_tlv, -1500, 300, 1); +static const DECLARE_TLV_DB_SCALE(adau1761_diff_in_tlv, -1200, 75, 0); +static const DECLARE_TLV_DB_SCALE(adau1761_out_tlv, -5700, 100, 0); +static const DECLARE_TLV_DB_SCALE(adau1761_sidetone_tlv, -1800, 300, 1); +static const DECLARE_TLV_DB_SCALE(adau1761_boost_tlv, -600, 600, 1); +static const DECLARE_TLV_DB_SCALE(adau1761_pga_boost_tlv, -2000, 2000, 1); + +static const unsigned int adau1761_bias_select_values[] = { + 0, 2, 3, +}; + +static const char * const adau1761_bias_select_text[] = { + "Normal operation", "Enhanced performance", "Power saving", +}; + +static const char * const adau1761_bias_select_extreme_text[] = { + "Normal operation", "Extreme power saving", "Enhanced performance", + "Power saving", +}; + +static SOC_ENUM_SINGLE_DECL(adau1761_adc_bias_enum, + ADAU17X1_REC_POWER_MGMT, 3, adau1761_bias_select_extreme_text); +static SOC_ENUM_SINGLE_DECL(adau1761_hp_bias_enum, + ADAU17X1_PLAY_POWER_MGMT, 6, adau1761_bias_select_extreme_text); +static SOC_ENUM_SINGLE_DECL(adau1761_dac_bias_enum, + ADAU17X1_PLAY_POWER_MGMT, 4, adau1761_bias_select_extreme_text); +static SOC_VALUE_ENUM_SINGLE_DECL(adau1761_playback_bias_enum, + ADAU17X1_PLAY_POWER_MGMT, 2, 0x3, adau1761_bias_select_text, + adau1761_bias_select_values); +static SOC_VALUE_ENUM_SINGLE_DECL(adau1761_capture_bias_enum, + ADAU17X1_REC_POWER_MGMT, 1, 0x3, adau1761_bias_select_text, + adau1761_bias_select_values); + +static const struct snd_kcontrol_new adau1761_jack_detect_controls[] = { + SOC_SINGLE("Speaker Auto-mute Switch", ADAU1761_DIGMIC_JACKDETECT, + 4, 1, 0), +}; + +static const struct snd_kcontrol_new adau1761_differential_mode_controls[] = { + SOC_DOUBLE_R_TLV("Capture Volume", ADAU1761_LEFT_DIFF_INPUT_VOL, + ADAU1761_RIGHT_DIFF_INPUT_VOL, 2, 0x3f, 0, + adau1761_diff_in_tlv), + SOC_DOUBLE_R("Capture Switch", ADAU1761_LEFT_DIFF_INPUT_VOL, + ADAU1761_RIGHT_DIFF_INPUT_VOL, 1, 1, 0), + + SOC_DOUBLE_R_TLV("PGA Boost Capture Volume", ADAU1761_REC_MIXER_LEFT1, + ADAU1761_REC_MIXER_RIGHT1, 3, 2, 0, adau1761_pga_boost_tlv), +}; + +static const struct snd_kcontrol_new adau1761_single_mode_controls[] = { + SOC_SINGLE_TLV("Input 1 Capture Volume", ADAU1761_REC_MIXER_LEFT0, + 4, 7, 0, adau1761_sing_in_tlv), + SOC_SINGLE_TLV("Input 2 Capture Volume", ADAU1761_REC_MIXER_LEFT0, + 1, 7, 0, adau1761_sing_in_tlv), + SOC_SINGLE_TLV("Input 3 Capture Volume", ADAU1761_REC_MIXER_RIGHT0, + 4, 7, 0, adau1761_sing_in_tlv), + SOC_SINGLE_TLV("Input 4 Capture Volume", ADAU1761_REC_MIXER_RIGHT0, + 1, 7, 0, adau1761_sing_in_tlv), +}; + +static const struct snd_kcontrol_new adau1761_controls[] = { + SOC_DOUBLE_R_TLV("Aux Capture Volume", ADAU1761_REC_MIXER_LEFT1, + ADAU1761_REC_MIXER_RIGHT1, 0, 7, 0, adau1761_sing_in_tlv), + + SOC_DOUBLE_R_TLV("Headphone Playback Volume", ADAU1761_PLAY_HP_LEFT_VOL, + ADAU1761_PLAY_HP_RIGHT_VOL, 2, 0x3f, 0, adau1761_out_tlv), + SOC_DOUBLE_R("Headphone Playback Switch", ADAU1761_PLAY_HP_LEFT_VOL, + ADAU1761_PLAY_HP_RIGHT_VOL, 1, 1, 0), + SOC_DOUBLE_R_TLV("Lineout Playback Volume", ADAU1761_PLAY_LINE_LEFT_VOL, + ADAU1761_PLAY_LINE_RIGHT_VOL, 2, 0x3f, 0, adau1761_out_tlv), + SOC_DOUBLE_R("Lineout Playback Switch", ADAU1761_PLAY_LINE_LEFT_VOL, + ADAU1761_PLAY_LINE_RIGHT_VOL, 1, 1, 0), + + SOC_ENUM("ADC Bias", adau1761_adc_bias_enum), + SOC_ENUM("DAC Bias", adau1761_dac_bias_enum), + SOC_ENUM("Capture Bias", adau1761_capture_bias_enum), + SOC_ENUM("Playback Bias", adau1761_playback_bias_enum), + SOC_ENUM("Headphone Bias", adau1761_hp_bias_enum), +}; + +static const struct snd_kcontrol_new adau1761_mono_controls[] = { + SOC_SINGLE_TLV("Mono Playback Volume", ADAU1761_PLAY_MONO_OUTPUT_VOL, + 2, 0x3f, 0, adau1761_out_tlv), + SOC_SINGLE("Mono Playback Switch", ADAU1761_PLAY_MONO_OUTPUT_VOL, + 1, 1, 0), +}; + +static const struct snd_kcontrol_new adau1761_left_mixer_controls[] = { + SOC_DAPM_SINGLE_AUTODISABLE("Left DAC Switch", + ADAU1761_PLAY_MIXER_LEFT0, 5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("Right DAC Switch", + ADAU1761_PLAY_MIXER_LEFT0, 6, 1, 0), + SOC_DAPM_SINGLE_TLV("Aux Bypass Volume", + ADAU1761_PLAY_MIXER_LEFT0, 1, 8, 0, adau1761_sidetone_tlv), + SOC_DAPM_SINGLE_TLV("Right Bypass Volume", + ADAU1761_PLAY_MIXER_LEFT1, 4, 8, 0, adau1761_sidetone_tlv), + SOC_DAPM_SINGLE_TLV("Left Bypass Volume", + ADAU1761_PLAY_MIXER_LEFT1, 0, 8, 0, adau1761_sidetone_tlv), +}; + +static const struct snd_kcontrol_new adau1761_right_mixer_controls[] = { + SOC_DAPM_SINGLE_AUTODISABLE("Left DAC Switch", + ADAU1761_PLAY_MIXER_RIGHT0, 5, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("Right DAC Switch", + ADAU1761_PLAY_MIXER_RIGHT0, 6, 1, 0), + SOC_DAPM_SINGLE_TLV("Aux Bypass Volume", + ADAU1761_PLAY_MIXER_RIGHT0, 1, 8, 0, adau1761_sidetone_tlv), + SOC_DAPM_SINGLE_TLV("Right Bypass Volume", + ADAU1761_PLAY_MIXER_RIGHT1, 4, 8, 0, adau1761_sidetone_tlv), + SOC_DAPM_SINGLE_TLV("Left Bypass Volume", + ADAU1761_PLAY_MIXER_RIGHT1, 0, 8, 0, adau1761_sidetone_tlv), +}; + +static const struct snd_kcontrol_new adau1761_left_lr_mixer_controls[] = { + SOC_DAPM_SINGLE_TLV("Left Volume", + ADAU1761_PLAY_LR_MIXER_LEFT, 1, 2, 0, adau1761_boost_tlv), + SOC_DAPM_SINGLE_TLV("Right Volume", + ADAU1761_PLAY_LR_MIXER_LEFT, 3, 2, 0, adau1761_boost_tlv), +}; + +static const struct snd_kcontrol_new adau1761_right_lr_mixer_controls[] = { + SOC_DAPM_SINGLE_TLV("Left Volume", + ADAU1761_PLAY_LR_MIXER_RIGHT, 1, 2, 0, adau1761_boost_tlv), + SOC_DAPM_SINGLE_TLV("Right Volume", + ADAU1761_PLAY_LR_MIXER_RIGHT, 3, 2, 0, adau1761_boost_tlv), +}; + +static const char * const adau1761_input_mux_text[] = { + "ADC", "DMIC", +}; + +static SOC_ENUM_SINGLE_DECL(adau1761_input_mux_enum, + ADAU17X1_ADC_CONTROL, 2, adau1761_input_mux_text); + +static const struct snd_kcontrol_new adau1761_input_mux_control = + SOC_DAPM_ENUM("Input Select", adau1761_input_mux_enum); + +static int adau1761_dejitter_fixup(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct adau *adau = snd_soc_codec_get_drvdata(w->codec); + + /* After any power changes have been made the dejitter circuit + * has to be reinitialized. */ + regmap_write(adau->regmap, ADAU1761_DEJITTER, 0); + if (!adau->master) + regmap_write(adau->regmap, ADAU1761_DEJITTER, 3); + + return 0; +} + +static const struct snd_soc_dapm_widget adau1x61_dapm_widgets[] = { + SND_SOC_DAPM_MIXER("Left Input Mixer", ADAU1761_REC_MIXER_LEFT0, 0, 0, + NULL, 0), + SND_SOC_DAPM_MIXER("Right Input Mixer", ADAU1761_REC_MIXER_RIGHT0, 0, 0, + NULL, 0), + + SOC_MIXER_ARRAY("Left Playback Mixer", ADAU1761_PLAY_MIXER_LEFT0, + 0, 0, adau1761_left_mixer_controls), + SOC_MIXER_ARRAY("Right Playback Mixer", ADAU1761_PLAY_MIXER_RIGHT0, + 0, 0, adau1761_right_mixer_controls), + SOC_MIXER_ARRAY("Left LR Playback Mixer", ADAU1761_PLAY_LR_MIXER_LEFT, + 0, 0, adau1761_left_lr_mixer_controls), + SOC_MIXER_ARRAY("Right LR Playback Mixer", ADAU1761_PLAY_LR_MIXER_RIGHT, + 0, 0, adau1761_right_lr_mixer_controls), + + SND_SOC_DAPM_SUPPLY("Headphone", ADAU1761_PLAY_HP_LEFT_VOL, + 0, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY_S("SYSCLK", 2, SND_SOC_NOPM, 0, 0, NULL, 0), + + SND_SOC_DAPM_POST("Dejitter fixup", adau1761_dejitter_fixup), + + SND_SOC_DAPM_INPUT("LAUX"), + SND_SOC_DAPM_INPUT("RAUX"), + SND_SOC_DAPM_INPUT("LINP"), + SND_SOC_DAPM_INPUT("LINN"), + SND_SOC_DAPM_INPUT("RINP"), + SND_SOC_DAPM_INPUT("RINN"), + + SND_SOC_DAPM_OUTPUT("LOUT"), + SND_SOC_DAPM_OUTPUT("ROUT"), + SND_SOC_DAPM_OUTPUT("LHP"), + SND_SOC_DAPM_OUTPUT("RHP"), +}; + +static const struct snd_soc_dapm_widget adau1761_mono_dapm_widgets[] = { + SND_SOC_DAPM_MIXER("Mono Playback Mixer", ADAU1761_PLAY_MIXER_MONO, + 0, 0, NULL, 0), + + SND_SOC_DAPM_OUTPUT("MONOOUT"), +}; + +static const struct snd_soc_dapm_widget adau1761_capless_dapm_widgets[] = { + SND_SOC_DAPM_SUPPLY_S("Headphone VGND", 1, ADAU1761_PLAY_MIXER_MONO, + 0, 0, NULL, 0), +}; + +static const struct snd_soc_dapm_route adau1x61_dapm_routes[] = { + { "Left Input Mixer", NULL, "LINP" }, + { "Left Input Mixer", NULL, "LINN" }, + { "Left Input Mixer", NULL, "LAUX" }, + + { "Right Input Mixer", NULL, "RINP" }, + { "Right Input Mixer", NULL, "RINN" }, + { "Right Input Mixer", NULL, "RAUX" }, + + { "Left Playback Mixer", NULL, "Left Playback Enable"}, + { "Right Playback Mixer", NULL, "Right Playback Enable"}, + { "Left LR Playback Mixer", NULL, "Left Playback Enable"}, + { "Right LR Playback Mixer", NULL, "Right Playback Enable"}, + + { "Left Playback Mixer", "Left DAC Switch", "Left DAC" }, + { "Left Playback Mixer", "Right DAC Switch", "Right DAC" }, + + { "Right Playback Mixer", "Left DAC Switch", "Left DAC" }, + { "Right Playback Mixer", "Right DAC Switch", "Right DAC" }, + + { "Left LR Playback Mixer", "Left Volume", "Left Playback Mixer" }, + { "Left LR Playback Mixer", "Right Volume", "Right Playback Mixer" }, + + { "Right LR Playback Mixer", "Left Volume", "Left Playback Mixer" }, + { "Right LR Playback Mixer", "Right Volume", "Right Playback Mixer" }, + + { "LHP", NULL, "Left Playback Mixer" }, + { "RHP", NULL, "Right Playback Mixer" }, + + { "LHP", NULL, "Headphone" }, + { "RHP", NULL, "Headphone" }, + + { "LOUT", NULL, "Left LR Playback Mixer" }, + { "ROUT", NULL, "Right LR Playback Mixer" }, + + { "Left Playback Mixer", "Aux Bypass Volume", "LAUX" }, + { "Left Playback Mixer", "Left Bypass Volume", "Left Input Mixer" }, + { "Left Playback Mixer", "Right Bypass Volume", "Right Input Mixer" }, + { "Right Playback Mixer", "Aux Bypass Volume", "RAUX" }, + { "Right Playback Mixer", "Left Bypass Volume", "Left Input Mixer" }, + { "Right Playback Mixer", "Right Bypass Volume", "Right Input Mixer" }, +}; + +static const struct snd_soc_dapm_route adau1761_mono_dapm_routes[] = { + { "Mono Playback Mixer", NULL, "Left Playback Mixer" }, + { "Mono Playback Mixer", NULL, "Right Playback Mixer" }, + + { "MONOOUT", NULL, "Mono Playback Mixer" }, +}; + +static const struct snd_soc_dapm_route adau1761_capless_dapm_routes[] = { + { "Headphone", NULL, "Headphone VGND" }, +}; + +static const struct snd_soc_dapm_widget adau1761_dmic_widgets[] = { + SND_SOC_DAPM_MUX("Left Decimator Mux", SND_SOC_NOPM, 0, 0, + &adau1761_input_mux_control), + SND_SOC_DAPM_MUX("Right Decimator Mux", SND_SOC_NOPM, 0, 0, + &adau1761_input_mux_control), + + SND_SOC_DAPM_INPUT("DMIC"), +}; + +static const struct snd_soc_dapm_route adau1761_dmic_routes[] = { + { "Left Decimator Mux", "ADC", "Left Input Mixer" }, + { "Left Decimator Mux", "DMIC", "DMIC" }, + { "Right Decimator Mux", "ADC", "Right Input Mixer" }, + { "Right Decimator Mux", "DMIC", "DMIC" }, + + { "Left Decimator", NULL, "Left Decimator Mux" }, + { "Right Decimator", NULL, "Right Decimator Mux" }, +}; + +static const struct snd_soc_dapm_route adau1761_no_dmic_routes[] = { + { "Left Decimator", NULL, "Left Input Mixer" }, + { "Right Decimator", NULL, "Right Input Mixer" }, +}; + +static const struct snd_soc_dapm_widget adau1761_dapm_widgets[] = { + SND_SOC_DAPM_SUPPLY("Serial Port Clock", ADAU1761_CLK_ENABLE0, + 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Serial Input Routing Clock", ADAU1761_CLK_ENABLE0, + 1, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Serial Output Routing Clock", ADAU1761_CLK_ENABLE0, + 3, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("Decimator Resync Clock", ADAU1761_CLK_ENABLE0, + 4, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Interpolator Resync Clock", ADAU1761_CLK_ENABLE0, + 2, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1, + 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY_S("Digital Clock 1", 1, ADAU1761_CLK_ENABLE1, + 1, 0, NULL, 0), +}; + +static const struct snd_soc_dapm_route adau1761_dapm_routes[] = { + { "Left Decimator", NULL, "Digital Clock 0", }, + { "Right Decimator", NULL, "Digital Clock 0", }, + { "Left DAC", NULL, "Digital Clock 0", }, + { "Right DAC", NULL, "Digital Clock 0", }, + + { "AIFCLK", NULL, "Digital Clock 1" }, + + { "Playback", NULL, "Serial Port Clock" }, + { "Capture", NULL, "Serial Port Clock" }, + { "Playback", NULL, "Serial Input Routing Clock" }, + { "Capture", NULL, "Serial Output Routing Clock" }, + + { "Left Decimator", NULL, "Decimator Resync Clock" }, + { "Right Decimator", NULL, "Decimator Resync Clock" }, + { "Left DAC", NULL, "Interpolator Resync Clock" }, + { "Right DAC", NULL, "Interpolator Resync Clock" }, + + { "DSP", NULL, "Digital Clock 0" }, + + { "Slew Clock", NULL, "Digital Clock 0" }, + { "Right Playback Mixer", NULL, "Slew Clock" }, + { "Left Playback Mixer", NULL, "Slew Clock" }, + + { "Digital Clock 0", NULL, "SYSCLK" }, + { "Digital Clock 1", NULL, "SYSCLK" }, +}; + +static int adau1761_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + + switch (level) { + case SND_SOC_BIAS_ON: + break; + case SND_SOC_BIAS_PREPARE: + break; + case SND_SOC_BIAS_STANDBY: + regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL, + ADAU17X1_CLOCK_CONTROL_SYSCLK_EN, + ADAU17X1_CLOCK_CONTROL_SYSCLK_EN); + break; + case SND_SOC_BIAS_OFF: + regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL, + ADAU17X1_CLOCK_CONTROL_SYSCLK_EN, 0); + break; + + } + codec->dapm.bias_level = level; + return 0; +} + +static enum adau1761_output_mode adau1761_get_lineout_mode( + struct snd_soc_codec *codec) +{ + struct adau1761_platform_data *pdata = codec->dev->platform_data; + + if (pdata) + return pdata->lineout_mode; + + return ADAU1761_OUTPUT_MODE_LINE; +} + +static int adau1761_setup_digmic_jackdetect(struct snd_soc_codec *codec) +{ + struct adau1761_platform_data *pdata = codec->dev->platform_data; + struct adau *adau = snd_soc_codec_get_drvdata(codec); + enum adau1761_digmic_jackdet_pin_mode mode; + unsigned int val = 0; + int ret; + + if (pdata) + mode = pdata->digmic_jackdetect_pin_mode; + else + mode = ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE; + + switch (mode) { + case ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT: + switch (pdata->jackdetect_debounce_time) { + case ADAU1761_JACKDETECT_DEBOUNCE_5MS: + case ADAU1761_JACKDETECT_DEBOUNCE_10MS: + case ADAU1761_JACKDETECT_DEBOUNCE_20MS: + case ADAU1761_JACKDETECT_DEBOUNCE_40MS: + val |= pdata->jackdetect_debounce_time << 6; + break; + default: + return -EINVAL; + } + if (pdata->jackdetect_active_low) + val |= ADAU1761_DIGMIC_JACKDETECT_ACTIVE_LOW; + + ret = snd_soc_add_codec_controls(codec, + adau1761_jack_detect_controls, + ARRAY_SIZE(adau1761_jack_detect_controls)); + if (ret) + return ret; + case ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: /* fallthrough */ + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1761_no_dmic_routes, + ARRAY_SIZE(adau1761_no_dmic_routes)); + if (ret) + return ret; + break; + case ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC: + ret = snd_soc_dapm_new_controls(&codec->dapm, + adau1761_dmic_widgets, + ARRAY_SIZE(adau1761_dmic_widgets)); + if (ret) + return ret; + + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1761_dmic_routes, + ARRAY_SIZE(adau1761_dmic_routes)); + if (ret) + return ret; + + val |= ADAU1761_DIGMIC_JACKDETECT_DIGMIC; + break; + default: + return -EINVAL; + } + + regmap_write(adau->regmap, ADAU1761_DIGMIC_JACKDETECT, val); + + return 0; +} + +static int adau1761_setup_headphone_mode(struct snd_soc_codec *codec) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + struct adau1761_platform_data *pdata = codec->dev->platform_data; + enum adau1761_output_mode mode; + int ret; + + if (pdata) + mode = pdata->headphone_mode; + else + mode = ADAU1761_OUTPUT_MODE_HEADPHONE; + + switch (mode) { + case ADAU1761_OUTPUT_MODE_LINE: + break; + case ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS: + regmap_update_bits(adau->regmap, ADAU1761_PLAY_MONO_OUTPUT_VOL, + ADAU1761_PLAY_MONO_OUTPUT_VOL_MODE_HP | + ADAU1761_PLAY_MONO_OUTPUT_VOL_UNMUTE, + ADAU1761_PLAY_MONO_OUTPUT_VOL_MODE_HP | + ADAU1761_PLAY_MONO_OUTPUT_VOL_UNMUTE); + /* fallthrough */ + case ADAU1761_OUTPUT_MODE_HEADPHONE: + regmap_update_bits(adau->regmap, ADAU1761_PLAY_HP_RIGHT_VOL, + ADAU1761_PLAY_HP_RIGHT_VOL_MODE_HP, + ADAU1761_PLAY_HP_RIGHT_VOL_MODE_HP); + break; + default: + return -EINVAL; + } + + if (mode == ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS) { + ret = snd_soc_dapm_new_controls(&codec->dapm, + adau1761_capless_dapm_widgets, + ARRAY_SIZE(adau1761_capless_dapm_widgets)); + if (ret) + return ret; + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1761_capless_dapm_routes, + ARRAY_SIZE(adau1761_capless_dapm_routes)); + } else { + ret = snd_soc_add_codec_controls(codec, adau1761_mono_controls, + ARRAY_SIZE(adau1761_mono_controls)); + if (ret) + return ret; + ret = snd_soc_dapm_new_controls(&codec->dapm, + adau1761_mono_dapm_widgets, + ARRAY_SIZE(adau1761_mono_dapm_widgets)); + if (ret) + return ret; + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1761_mono_dapm_routes, + ARRAY_SIZE(adau1761_mono_dapm_routes)); + } + + return ret; +} + +static bool adau1761_readable_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ADAU1761_DIGMIC_JACKDETECT: + case ADAU1761_REC_MIXER_LEFT0: + case ADAU1761_REC_MIXER_LEFT1: + case ADAU1761_REC_MIXER_RIGHT0: + case ADAU1761_REC_MIXER_RIGHT1: + case ADAU1761_LEFT_DIFF_INPUT_VOL: + case ADAU1761_RIGHT_DIFF_INPUT_VOL: + case ADAU1761_PLAY_LR_MIXER_LEFT: + case ADAU1761_PLAY_MIXER_LEFT0: + case ADAU1761_PLAY_MIXER_LEFT1: + case ADAU1761_PLAY_MIXER_RIGHT0: + case ADAU1761_PLAY_MIXER_RIGHT1: + case ADAU1761_PLAY_LR_MIXER_RIGHT: + case ADAU1761_PLAY_MIXER_MONO: + case ADAU1761_PLAY_HP_LEFT_VOL: + case ADAU1761_PLAY_HP_RIGHT_VOL: + case ADAU1761_PLAY_LINE_LEFT_VOL: + case ADAU1761_PLAY_LINE_RIGHT_VOL: + case ADAU1761_PLAY_MONO_OUTPUT_VOL: + case ADAU1761_POP_CLICK_SUPPRESS: + case ADAU1761_JACK_DETECT_PIN: + case ADAU1761_DEJITTER: + case ADAU1761_CLK_ENABLE0: + case ADAU1761_CLK_ENABLE1: + return true; + default: + break; + } + + return adau17x1_readable_register(dev, reg); +} + +static int adau1761_codec_probe(struct snd_soc_codec *codec) +{ + struct adau1761_platform_data *pdata = codec->dev->platform_data; + struct adau *adau = snd_soc_codec_get_drvdata(codec); + int ret; + + ret = adau17x1_add_widgets(codec); + if (ret < 0) + return ret; + + if (pdata && pdata->input_differential) { + regmap_update_bits(adau->regmap, ADAU1761_LEFT_DIFF_INPUT_VOL, + ADAU1761_DIFF_INPUT_VOL_LDEN, + ADAU1761_DIFF_INPUT_VOL_LDEN); + regmap_update_bits(adau->regmap, ADAU1761_RIGHT_DIFF_INPUT_VOL, + ADAU1761_DIFF_INPUT_VOL_LDEN, + ADAU1761_DIFF_INPUT_VOL_LDEN); + ret = snd_soc_add_codec_controls(codec, + adau1761_differential_mode_controls, + ARRAY_SIZE(adau1761_differential_mode_controls)); + if (ret) + return ret; + } else { + ret = snd_soc_add_codec_controls(codec, + adau1761_single_mode_controls, + ARRAY_SIZE(adau1761_single_mode_controls)); + if (ret) + return ret; + } + + switch (adau1761_get_lineout_mode(codec)) { + case ADAU1761_OUTPUT_MODE_LINE: + break; + case ADAU1761_OUTPUT_MODE_HEADPHONE: + regmap_update_bits(adau->regmap, ADAU1761_PLAY_LINE_LEFT_VOL, + ADAU1761_PLAY_LINE_LEFT_VOL_MODE_HP, + ADAU1761_PLAY_LINE_LEFT_VOL_MODE_HP); + regmap_update_bits(adau->regmap, ADAU1761_PLAY_LINE_RIGHT_VOL, + ADAU1761_PLAY_LINE_RIGHT_VOL_MODE_HP, + ADAU1761_PLAY_LINE_RIGHT_VOL_MODE_HP); + break; + default: + return -EINVAL; + } + + ret = adau1761_setup_headphone_mode(codec); + if (ret) + return ret; + + ret = adau1761_setup_digmic_jackdetect(codec); + if (ret) + return ret; + + if (adau->type == ADAU1761) { + ret = snd_soc_dapm_new_controls(&codec->dapm, + adau1761_dapm_widgets, + ARRAY_SIZE(adau1761_dapm_widgets)); + if (ret) + return ret; + + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1761_dapm_routes, + ARRAY_SIZE(adau1761_dapm_routes)); + if (ret) + return ret; + + ret = adau17x1_load_firmware(adau, codec->dev, + ADAU1761_FIRMWARE); + if (ret) + dev_warn(codec->dev, "Failed to firmware\n"); + } + + ret = adau17x1_add_routes(codec); + if (ret < 0) + return ret; + + return 0; +} + +static const struct snd_soc_codec_driver adau1761_codec_driver = { + .probe = adau1761_codec_probe, + .suspend = adau17x1_suspend, + .resume = adau17x1_resume, + .set_bias_level = adau1761_set_bias_level, + + .controls = adau1761_controls, + .num_controls = ARRAY_SIZE(adau1761_controls), + .dapm_widgets = adau1x61_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(adau1x61_dapm_widgets), + .dapm_routes = adau1x61_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(adau1x61_dapm_routes), +}; + +#define ADAU1761_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_driver adau1361_dai_driver = { + .name = "adau-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 4, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = ADAU1761_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 4, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = ADAU1761_FORMATS, + }, + .ops = &adau17x1_dai_ops, +}; + +static struct snd_soc_dai_driver adau1761_dai_driver = { + .name = "adau-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = ADAU1761_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = ADAU1761_FORMATS, + }, + .ops = &adau17x1_dai_ops, +}; + +int adau1761_probe(struct device *dev, struct regmap *regmap, + enum adau17x1_type type, void (*switch_mode)(struct device *dev)) +{ + struct snd_soc_dai_driver *dai_drv; + int ret; + + ret = adau17x1_probe(dev, regmap, type, switch_mode); + if (ret) + return ret; + + if (type == ADAU1361) + dai_drv = &adau1361_dai_driver; + else + dai_drv = &adau1761_dai_driver; + + return snd_soc_register_codec(dev, &adau1761_codec_driver, dai_drv, 1); +} +EXPORT_SYMBOL_GPL(adau1761_probe); + +const struct regmap_config adau1761_regmap_config = { + .val_bits = 8, + .reg_bits = 16, + .max_register = 0x40fa, + .reg_defaults = adau1761_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(adau1761_reg_defaults), + .readable_reg = adau1761_readable_register, + .volatile_reg = adau17x1_volatile_register, + .cache_type = REGCACHE_RBTREE, +}; +EXPORT_SYMBOL_GPL(adau1761_regmap_config); + +MODULE_DESCRIPTION("ASoC ADAU1361/ADAU1461/ADAU1761/ADAU1961 CODEC driver"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau1761.h b/sound/soc/codecs/adau1761.h new file mode 100644 index 000000000000..a9e0d288301e --- /dev/null +++ b/sound/soc/codecs/adau1761.h @@ -0,0 +1,23 @@ +/* + * ADAU1361/ADAU1461/ADAU1761/ADAU1961 driver + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __SOUND_SOC_CODECS_ADAU1761_H__ +#define __SOUND_SOC_CODECS_ADAU1761_H__ + +#include +#include "adau17x1.h" + +struct device; + +int adau1761_probe(struct device *dev, struct regmap *regmap, + enum adau17x1_type type, void (*switch_mode)(struct device *dev)); + +extern const struct regmap_config adau1761_regmap_config; + +#endif -- cgit v1.2.3 From 2923af024681508132881c9e5ddd65cd51b0d8e3 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 27 May 2014 10:53:19 +0200 Subject: ASoC: Add ADAU1381/ADAU1781 audio CODEC support This patch adds support for the Analog Devices ADAU1381 and ADAU1781 audio CODECs. The device is a low-power, 24-bit stereo audio CODEC with multiple analog inputs and outputs, two digital microphone inputs and an I2S interface. The device can be controlled either using I2C or SPI. The main difference between the two variants is that the ADAU1781 has a freely programmable SigmaDSP processor, while the ADAU1381 has a fixed function wind noise reduction filter. Signed-off-by: Lars-Peter Clausen Signed-off-by: Mark Brown --- include/linux/platform_data/adau17x1.h | 19 ++ sound/soc/codecs/Kconfig | 16 ++ sound/soc/codecs/Makefile | 6 + sound/soc/codecs/adau1781-i2c.c | 58 ++++ sound/soc/codecs/adau1781-spi.c | 75 +++++ sound/soc/codecs/adau1781.c | 511 +++++++++++++++++++++++++++++++++ sound/soc/codecs/adau1781.h | 23 ++ 7 files changed, 708 insertions(+) create mode 100644 sound/soc/codecs/adau1781-i2c.c create mode 100644 sound/soc/codecs/adau1781-spi.c create mode 100644 sound/soc/codecs/adau1781.c create mode 100644 sound/soc/codecs/adau1781.h (limited to 'include/linux') diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h index d234d9e46fd6..a81766cae230 100644 --- a/include/linux/platform_data/adau17x1.h +++ b/include/linux/platform_data/adau17x1.h @@ -87,4 +87,23 @@ struct adau1761_platform_data { enum adau17x1_micbias_voltage micbias_voltage; }; +/** + * struct adau1781_platform_data - ADAU1781 Codec driver platform data + * @left_input_differential: If true configure the left input as + * differential input. + * @right_input_differential: If true configure the right input as differntial + * input. + * @use_dmic: If true configure the MIC pins as digital microphone pins instead + * of analog microphone pins. + * @micbias_voltage: Microphone voltage bias + */ +struct adau1781_platform_data { + bool left_input_differential; + bool right_input_differential; + + bool use_dmic; + + enum adau17x1_micbias_voltage micbias_voltage; +}; + #endif diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 4233ed118c48..dd5deea9221a 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -25,6 +25,8 @@ config SND_SOC_ALL_CODECS select SND_SOC_ADAU1373 if I2C select SND_SOC_ADAU1761_I2C if I2C select SND_SOC_ADAU1761_SPI if SPI + select SND_SOC_ADAU1781_I2C if I2C + select SND_SOC_ADAU1781_SPI if SPI select SND_SOC_ADAV801 if SPI_MASTER select SND_SOC_ADAV803 if I2C select SND_SOC_ADAU1977_SPI if SPI_MASTER @@ -238,6 +240,20 @@ config SND_SOC_ADAU1761_SPI select SND_SOC_ADAU1761 select REGMAP_SPI +config SND_SOC_ADAU1781 + select SND_SOC_ADAU17X1 + tristate + +config SND_SOC_ADAU1781_I2C + tristate + select SND_SOC_ADAU1781 + select REGMAP_I2C + +config SND_SOC_ADAU1781_SPI + tristate + select SND_SOC_ADAU1781 + select REGMAP_SPI + config SND_SOC_ADAU1977 tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index e96499ca76bd..4ef1a1ed5f92 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -13,6 +13,9 @@ snd-soc-adau17x1-objs := adau17x1.o snd-soc-adau1761-objs := adau1761.o snd-soc-adau1761-i2c-objs := adau1761-i2c.o snd-soc-adau1761-spi-objs := adau1761-spi.o +snd-soc-adau1781-objs := adau1781.o +snd-soc-adau1781-i2c-objs := adau1781-i2c.o +snd-soc-adau1781-spi-objs := adau1781-spi.o snd-soc-adau1977-objs := adau1977.o snd-soc-adau1977-spi-objs := adau1977-spi.o snd-soc-adau1977-i2c-objs := adau1977-i2c.o @@ -166,6 +169,9 @@ obj-$(CONFIG_SND_SOC_ADAU17X1) += snd-soc-adau17x1.o obj-$(CONFIG_SND_SOC_ADAU1761) += snd-soc-adau1761.o obj-$(CONFIG_SND_SOC_ADAU1761_I2C) += snd-soc-adau1761-i2c.o obj-$(CONFIG_SND_SOC_ADAU1761_SPI) += snd-soc-adau1761-spi.o +obj-$(CONFIG_SND_SOC_ADAU1781) += snd-soc-adau1781.o +obj-$(CONFIG_SND_SOC_ADAU1781_I2C) += snd-soc-adau1781-i2c.o +obj-$(CONFIG_SND_SOC_ADAU1781_SPI) += snd-soc-adau1781-spi.o obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c new file mode 100644 index 000000000000..2ce4362ccec1 --- /dev/null +++ b/sound/soc/codecs/adau1781-i2c.c @@ -0,0 +1,58 @@ +/* + * Driver for ADAU1381/ADAU1781 CODEC + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include + +#include "adau1781.h" + +static int adau1781_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct regmap_config config; + + config = adau1781_regmap_config; + config.val_bits = 8; + config.reg_bits = 16; + + return adau1781_probe(&client->dev, + devm_regmap_init_i2c(client, &config), + id->driver_data, NULL); +} + +static int adau1781_i2c_remove(struct i2c_client *client) +{ + snd_soc_unregister_codec(&client->dev); + return 0; +} + +static const struct i2c_device_id adau1781_i2c_ids[] = { + { "adau1381", ADAU1381 }, + { "adau1781", ADAU1781 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adau1781_i2c_ids); + +static struct i2c_driver adau1781_i2c_driver = { + .driver = { + .name = "adau1781", + .owner = THIS_MODULE, + }, + .probe = adau1781_i2c_probe, + .remove = adau1781_i2c_remove, + .id_table = adau1781_i2c_ids, +}; +module_i2c_driver(adau1781_i2c_driver); + +MODULE_DESCRIPTION("ASoC ADAU1381/ADAU1781 CODEC I2C driver"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau1781-spi.c b/sound/soc/codecs/adau1781-spi.c new file mode 100644 index 000000000000..194686716bbe --- /dev/null +++ b/sound/soc/codecs/adau1781-spi.c @@ -0,0 +1,75 @@ +/* + * Driver for ADAU1381/ADAU1781 CODEC + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include + +#include "adau1781.h" + +static void adau1781_spi_switch_mode(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + /* + * To get the device into SPI mode CLATCH has to be pulled low three + * times. Do this by issuing three dummy reads. + */ + spi_w8r8(spi, 0x00); + spi_w8r8(spi, 0x00); + spi_w8r8(spi, 0x00); +} + +static int adau1781_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + struct regmap_config config; + + if (!id) + return -EINVAL; + + config = adau1781_regmap_config; + config.val_bits = 8; + config.reg_bits = 24; + config.read_flag_mask = 0x1; + + return adau1781_probe(&spi->dev, + devm_regmap_init_spi(spi, &config), + id->driver_data, adau1781_spi_switch_mode); +} + +static int adau1781_spi_remove(struct spi_device *spi) +{ + snd_soc_unregister_codec(&spi->dev); + return 0; +} + +static const struct spi_device_id adau1781_spi_id[] = { + { "adau1381", ADAU1381 }, + { "adau1781", ADAU1781 }, + { } +}; +MODULE_DEVICE_TABLE(spi, adau1781_spi_id); + +static struct spi_driver adau1781_spi_driver = { + .driver = { + .name = "adau1781", + .owner = THIS_MODULE, + }, + .probe = adau1781_spi_probe, + .remove = adau1781_spi_remove, + .id_table = adau1781_spi_id, +}; +module_spi_driver(adau1781_spi_driver); + +MODULE_DESCRIPTION("ASoC ADAU1381/ADAU1781 CODEC SPI driver"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau1781.c b/sound/soc/codecs/adau1781.c new file mode 100644 index 000000000000..045a61413840 --- /dev/null +++ b/sound/soc/codecs/adau1781.c @@ -0,0 +1,511 @@ +/* + * Driver for ADAU1781/ADAU1781 codec + * + * Copyright 2011-2013 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adau17x1.h" +#include "adau1781.h" + +#define ADAU1781_DMIC_BEEP_CTRL 0x4008 +#define ADAU1781_LEFT_PGA 0x400e +#define ADAU1781_RIGHT_PGA 0x400f +#define ADAU1781_LEFT_PLAYBACK_MIXER 0x401c +#define ADAU1781_RIGHT_PLAYBACK_MIXER 0x401e +#define ADAU1781_MONO_PLAYBACK_MIXER 0x401f +#define ADAU1781_LEFT_LINEOUT 0x4025 +#define ADAU1781_RIGHT_LINEOUT 0x4026 +#define ADAU1781_SPEAKER 0x4027 +#define ADAU1781_BEEP_ZC 0x4028 +#define ADAU1781_DEJITTER 0x4032 +#define ADAU1781_DIG_PWDN0 0x4080 +#define ADAU1781_DIG_PWDN1 0x4081 + +#define ADAU1781_INPUT_DIFFERNTIAL BIT(3) + +#define ADAU1381_FIRMWARE "adau1381.bin" +#define ADAU1781_FIRMWARE "adau1781.bin" + +static const struct reg_default adau1781_reg_defaults[] = { + { ADAU1781_DMIC_BEEP_CTRL, 0x00 }, + { ADAU1781_LEFT_PGA, 0xc7 }, + { ADAU1781_RIGHT_PGA, 0xc7 }, + { ADAU1781_LEFT_PLAYBACK_MIXER, 0x00 }, + { ADAU1781_RIGHT_PLAYBACK_MIXER, 0x00 }, + { ADAU1781_MONO_PLAYBACK_MIXER, 0x00 }, + { ADAU1781_LEFT_LINEOUT, 0x00 }, + { ADAU1781_RIGHT_LINEOUT, 0x00 }, + { ADAU1781_SPEAKER, 0x00 }, + { ADAU1781_BEEP_ZC, 0x19 }, + { ADAU1781_DEJITTER, 0x60 }, + { ADAU1781_DIG_PWDN1, 0x0c }, + { ADAU1781_DIG_PWDN1, 0x00 }, + { ADAU17X1_CLOCK_CONTROL, 0x00 }, + { ADAU17X1_PLL_CONTROL, 0x00 }, + { ADAU17X1_REC_POWER_MGMT, 0x00 }, + { ADAU17X1_MICBIAS, 0x04 }, + { ADAU17X1_SERIAL_PORT0, 0x00 }, + { ADAU17X1_SERIAL_PORT1, 0x00 }, + { ADAU17X1_CONVERTER0, 0x00 }, + { ADAU17X1_CONVERTER1, 0x00 }, + { ADAU17X1_LEFT_INPUT_DIGITAL_VOL, 0x00 }, + { ADAU17X1_RIGHT_INPUT_DIGITAL_VOL, 0x00 }, + { ADAU17X1_ADC_CONTROL, 0x00 }, + { ADAU17X1_PLAY_POWER_MGMT, 0x00 }, + { ADAU17X1_DAC_CONTROL0, 0x00 }, + { ADAU17X1_DAC_CONTROL1, 0x00 }, + { ADAU17X1_DAC_CONTROL2, 0x00 }, + { ADAU17X1_SERIAL_PORT_PAD, 0x00 }, + { ADAU17X1_CONTROL_PORT_PAD0, 0x00 }, + { ADAU17X1_CONTROL_PORT_PAD1, 0x00 }, + { ADAU17X1_DSP_SAMPLING_RATE, 0x01 }, + { ADAU17X1_SERIAL_INPUT_ROUTE, 0x00 }, + { ADAU17X1_SERIAL_OUTPUT_ROUTE, 0x00 }, + { ADAU17X1_DSP_ENABLE, 0x00 }, + { ADAU17X1_DSP_RUN, 0x00 }, + { ADAU17X1_SERIAL_SAMPLING_RATE, 0x00 }, +}; + +static const DECLARE_TLV_DB_SCALE(adau1781_speaker_tlv, 0, 200, 0); + +static const DECLARE_TLV_DB_RANGE(adau1781_pga_tlv, + 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0), + 2, 3, TLV_DB_SCALE_ITEM(1000, 400, 0), + 4, 4, TLV_DB_SCALE_ITEM(1700, 0, 0), + 5, 7, TLV_DB_SCALE_ITEM(2000, 600, 0) +); + +static const DECLARE_TLV_DB_RANGE(adau1781_beep_tlv, + 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0), + 2, 3, TLV_DB_SCALE_ITEM(1000, 400, 0), + 4, 4, TLV_DB_SCALE_ITEM(-2300, 0, 0), + 5, 7, TLV_DB_SCALE_ITEM(2000, 600, 0) +); + +static const DECLARE_TLV_DB_SCALE(adau1781_sidetone_tlv, -1800, 300, 1); + +static const char * const adau1781_speaker_bias_select_text[] = { + "Normal operation", "Power saving", "Enhanced performance", +}; + +static const char * const adau1781_bias_select_text[] = { + "Normal operation", "Extreme power saving", "Power saving", + "Enhanced performance", +}; + +static SOC_ENUM_SINGLE_DECL(adau1781_adc_bias_enum, + ADAU17X1_REC_POWER_MGMT, 3, adau1781_bias_select_text); +static SOC_ENUM_SINGLE_DECL(adau1781_speaker_bias_enum, + ADAU17X1_PLAY_POWER_MGMT, 6, adau1781_speaker_bias_select_text); +static SOC_ENUM_SINGLE_DECL(adau1781_dac_bias_enum, + ADAU17X1_PLAY_POWER_MGMT, 4, adau1781_bias_select_text); +static SOC_ENUM_SINGLE_DECL(adau1781_playback_bias_enum, + ADAU17X1_PLAY_POWER_MGMT, 2, adau1781_bias_select_text); +static SOC_ENUM_SINGLE_DECL(adau1781_capture_bias_enum, + ADAU17X1_REC_POWER_MGMT, 1, adau1781_bias_select_text); + +static const struct snd_kcontrol_new adau1781_controls[] = { + SOC_SINGLE_TLV("Beep Capture Volume", ADAU1781_DMIC_BEEP_CTRL, 0, 7, 0, + adau1781_beep_tlv), + SOC_DOUBLE_R_TLV("PGA Capture Volume", ADAU1781_LEFT_PGA, + ADAU1781_RIGHT_PGA, 5, 7, 0, adau1781_pga_tlv), + SOC_DOUBLE_R("PGA Capture Switch", ADAU1781_LEFT_PGA, + ADAU1781_RIGHT_PGA, 1, 1, 0), + + SOC_DOUBLE_R("Lineout Playback Switch", ADAU1781_LEFT_LINEOUT, + ADAU1781_RIGHT_LINEOUT, 1, 1, 0), + SOC_SINGLE("Beep ZC Switch", ADAU1781_BEEP_ZC, 0, 1, 0), + + SOC_SINGLE("Mono Playback Switch", ADAU1781_MONO_PLAYBACK_MIXER, + 0, 1, 0), + SOC_SINGLE_TLV("Mono Playback Volume", ADAU1781_SPEAKER, 6, 3, 0, + adau1781_speaker_tlv), + + SOC_ENUM("ADC Bias", adau1781_adc_bias_enum), + SOC_ENUM("DAC Bias", adau1781_dac_bias_enum), + SOC_ENUM("Capture Bias", adau1781_capture_bias_enum), + SOC_ENUM("Playback Bias", adau1781_playback_bias_enum), + SOC_ENUM("Speaker Bias", adau1781_speaker_bias_enum), +}; + +static const struct snd_kcontrol_new adau1781_beep_mixer_controls[] = { + SOC_DAPM_SINGLE("Beep Capture Switch", ADAU1781_DMIC_BEEP_CTRL, + 3, 1, 0), +}; + +static const struct snd_kcontrol_new adau1781_left_mixer_controls[] = { + SOC_DAPM_SINGLE_AUTODISABLE("Switch", + ADAU1781_LEFT_PLAYBACK_MIXER, 5, 1, 0), + SOC_DAPM_SINGLE_TLV("Beep Playback Volume", + ADAU1781_LEFT_PLAYBACK_MIXER, 1, 8, 0, adau1781_sidetone_tlv), +}; + +static const struct snd_kcontrol_new adau1781_right_mixer_controls[] = { + SOC_DAPM_SINGLE_AUTODISABLE("Switch", + ADAU1781_RIGHT_PLAYBACK_MIXER, 6, 1, 0), + SOC_DAPM_SINGLE_TLV("Beep Playback Volume", + ADAU1781_LEFT_PLAYBACK_MIXER, 1, 8, 0, adau1781_sidetone_tlv), +}; + +static const struct snd_kcontrol_new adau1781_mono_mixer_controls[] = { + SOC_DAPM_SINGLE_AUTODISABLE("Left Switch", + ADAU1781_MONO_PLAYBACK_MIXER, 7, 1, 0), + SOC_DAPM_SINGLE_AUTODISABLE("Right Switch", + ADAU1781_MONO_PLAYBACK_MIXER, 6, 1, 0), + SOC_DAPM_SINGLE_TLV("Beep Playback Volume", + ADAU1781_MONO_PLAYBACK_MIXER, 2, 8, 0, adau1781_sidetone_tlv), +}; + +static int adau1781_dejitter_fixup(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct adau *adau = snd_soc_codec_get_drvdata(codec); + + /* After any power changes have been made the dejitter circuit + * has to be reinitialized. */ + regmap_write(adau->regmap, ADAU1781_DEJITTER, 0); + if (!adau->master) + regmap_write(adau->regmap, ADAU1781_DEJITTER, 5); + + return 0; +} + +static const struct snd_soc_dapm_widget adau1781_dapm_widgets[] = { + SND_SOC_DAPM_PGA("Left PGA", ADAU1781_LEFT_PGA, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA("Right PGA", ADAU1781_RIGHT_PGA, 0, 0, NULL, 0), + + SND_SOC_DAPM_OUT_DRV("Speaker", ADAU1781_SPEAKER, 0, 0, NULL, 0), + + SOC_MIXER_NAMED_CTL_ARRAY("Beep Mixer", ADAU17X1_MICBIAS, 4, 0, + adau1781_beep_mixer_controls), + + SOC_MIXER_ARRAY("Left Lineout Mixer", SND_SOC_NOPM, 0, 0, + adau1781_left_mixer_controls), + SOC_MIXER_ARRAY("Right Lineout Mixer", SND_SOC_NOPM, 0, 0, + adau1781_right_mixer_controls), + SOC_MIXER_ARRAY("Mono Mixer", SND_SOC_NOPM, 0, 0, + adau1781_mono_mixer_controls), + + SND_SOC_DAPM_SUPPLY("Serial Input Routing", ADAU1781_DIG_PWDN0, + 2, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Serial Output Routing", ADAU1781_DIG_PWDN0, + 3, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Clock Domain Transfer", ADAU1781_DIG_PWDN0, + 5, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Serial Ports", ADAU1781_DIG_PWDN0, 4, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC Engine", ADAU1781_DIG_PWDN0, 7, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("DAC Engine", ADAU1781_DIG_PWDN1, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("Digital Mic", ADAU1781_DIG_PWDN1, 1, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("Sound Engine", ADAU1781_DIG_PWDN0, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY_S("SYSCLK", 1, ADAU1781_DIG_PWDN0, 1, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("Zero Crossing Detector", ADAU1781_DIG_PWDN1, 2, 0, + NULL, 0), + + SND_SOC_DAPM_POST("Dejitter fixup", adau1781_dejitter_fixup), + + SND_SOC_DAPM_INPUT("BEEP"), + + SND_SOC_DAPM_OUTPUT("AOUTL"), + SND_SOC_DAPM_OUTPUT("AOUTR"), + SND_SOC_DAPM_OUTPUT("SP"), + SND_SOC_DAPM_INPUT("LMIC"), + SND_SOC_DAPM_INPUT("RMIC"), +}; + +static const struct snd_soc_dapm_route adau1781_dapm_routes[] = { + { "Left Lineout Mixer", NULL, "Left Playback Enable" }, + { "Right Lineout Mixer", NULL, "Right Playback Enable" }, + + { "Left Lineout Mixer", "Beep Playback Volume", "Beep Mixer" }, + { "Left Lineout Mixer", "Switch", "Left DAC" }, + + { "Right Lineout Mixer", "Beep Playback Volume", "Beep Mixer" }, + { "Right Lineout Mixer", "Switch", "Right DAC" }, + + { "Mono Mixer", "Beep Playback Volume", "Beep Mixer" }, + { "Mono Mixer", "Right Switch", "Right DAC" }, + { "Mono Mixer", "Left Switch", "Left DAC" }, + { "Speaker", NULL, "Mono Mixer" }, + + { "Mono Mixer", NULL, "SYSCLK" }, + { "Left Lineout Mixer", NULL, "SYSCLK" }, + { "Left Lineout Mixer", NULL, "SYSCLK" }, + + { "Beep Mixer", "Beep Capture Switch", "BEEP" }, + { "Beep Mixer", NULL, "Zero Crossing Detector" }, + + { "Left DAC", NULL, "DAC Engine" }, + { "Right DAC", NULL, "DAC Engine" }, + + { "Sound Engine", NULL, "SYSCLK" }, + { "DSP", NULL, "Sound Engine" }, + + { "Left Decimator", NULL, "ADC Engine" }, + { "Right Decimator", NULL, "ADC Engine" }, + + { "AIFCLK", NULL, "SYSCLK" }, + + { "Playback", NULL, "Serial Input Routing" }, + { "Playback", NULL, "Serial Ports" }, + { "Playback", NULL, "Clock Domain Transfer" }, + { "Capture", NULL, "Serial Output Routing" }, + { "Capture", NULL, "Serial Ports" }, + { "Capture", NULL, "Clock Domain Transfer" }, + + { "AOUTL", NULL, "Left Lineout Mixer" }, + { "AOUTR", NULL, "Right Lineout Mixer" }, + { "SP", NULL, "Speaker" }, +}; + +static const struct snd_soc_dapm_route adau1781_adc_dapm_routes[] = { + { "Left PGA", NULL, "LMIC" }, + { "Right PGA", NULL, "RMIC" }, + + { "Left Decimator", NULL, "Left PGA" }, + { "Right Decimator", NULL, "Right PGA" }, +}; + +static const char * const adau1781_dmic_select_text[] = { + "DMIC1", "DMIC2", +}; + +static SOC_ENUM_SINGLE_VIRT_DECL(adau1781_dmic_select_enum, + adau1781_dmic_select_text); + +static const struct snd_kcontrol_new adau1781_dmic_mux = + SOC_DAPM_ENUM("DMIC Select", adau1781_dmic_select_enum); + +static const struct snd_soc_dapm_widget adau1781_dmic_dapm_widgets[] = { + SND_SOC_DAPM_MUX("DMIC Select", SND_SOC_NOPM, 0, 0, &adau1781_dmic_mux), + + SND_SOC_DAPM_ADC("DMIC1", NULL, ADAU1781_DMIC_BEEP_CTRL, 4, 0), + SND_SOC_DAPM_ADC("DMIC2", NULL, ADAU1781_DMIC_BEEP_CTRL, 5, 0), +}; + +static const struct snd_soc_dapm_route adau1781_dmic_dapm_routes[] = { + { "DMIC1", NULL, "LMIC" }, + { "DMIC2", NULL, "RMIC" }, + + { "DMIC1", NULL, "Digital Mic" }, + { "DMIC2", NULL, "Digital Mic" }, + + { "DMIC Select", "DMIC1", "DMIC1" }, + { "DMIC Select", "DMIC2", "DMIC2" }, + + { "Left Decimator", NULL, "DMIC Select" }, + { "Right Decimator", NULL, "DMIC Select" }, +}; + +static int adau1781_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + struct adau *adau = snd_soc_codec_get_drvdata(codec); + + switch (level) { + case SND_SOC_BIAS_ON: + break; + case SND_SOC_BIAS_PREPARE: + break; + case SND_SOC_BIAS_STANDBY: + regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL, + ADAU17X1_CLOCK_CONTROL_SYSCLK_EN, + ADAU17X1_CLOCK_CONTROL_SYSCLK_EN); + + /* Precharge */ + regmap_update_bits(adau->regmap, ADAU1781_DIG_PWDN1, 0x8, 0x8); + break; + case SND_SOC_BIAS_OFF: + regmap_update_bits(adau->regmap, ADAU1781_DIG_PWDN1, 0xc, 0x0); + regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL, + ADAU17X1_CLOCK_CONTROL_SYSCLK_EN, 0); + break; + } + + codec->dapm.bias_level = level; + return 0; +} + +static bool adau1781_readable_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ADAU1781_DMIC_BEEP_CTRL: + case ADAU1781_LEFT_PGA: + case ADAU1781_RIGHT_PGA: + case ADAU1781_LEFT_PLAYBACK_MIXER: + case ADAU1781_RIGHT_PLAYBACK_MIXER: + case ADAU1781_MONO_PLAYBACK_MIXER: + case ADAU1781_LEFT_LINEOUT: + case ADAU1781_RIGHT_LINEOUT: + case ADAU1781_SPEAKER: + case ADAU1781_BEEP_ZC: + case ADAU1781_DEJITTER: + case ADAU1781_DIG_PWDN0: + case ADAU1781_DIG_PWDN1: + return true; + default: + break; + } + + return adau17x1_readable_register(dev, reg); +} + +static int adau1781_set_input_mode(struct adau *adau, unsigned int reg, + bool differential) +{ + unsigned int val; + + if (differential) + val = ADAU1781_INPUT_DIFFERNTIAL; + else + val = 0; + + return regmap_update_bits(adau->regmap, reg, + ADAU1781_INPUT_DIFFERNTIAL, val); +} + +static int adau1781_codec_probe(struct snd_soc_codec *codec) +{ + struct adau1781_platform_data *pdata = dev_get_platdata(codec->dev); + struct adau *adau = snd_soc_codec_get_drvdata(codec); + const char *firmware; + int ret; + + ret = adau17x1_add_widgets(codec); + if (ret) + return ret; + + if (pdata) { + ret = adau1781_set_input_mode(adau, ADAU1781_LEFT_PGA, + pdata->left_input_differential); + if (ret) + return ret; + ret = adau1781_set_input_mode(adau, ADAU1781_RIGHT_PGA, + pdata->right_input_differential); + if (ret) + return ret; + } + + if (pdata && pdata->use_dmic) { + ret = snd_soc_dapm_new_controls(&codec->dapm, + adau1781_dmic_dapm_widgets, + ARRAY_SIZE(adau1781_dmic_dapm_widgets)); + if (ret) + return ret; + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1781_dmic_dapm_routes, + ARRAY_SIZE(adau1781_dmic_dapm_routes)); + if (ret) + return ret; + } else { + ret = snd_soc_dapm_add_routes(&codec->dapm, + adau1781_adc_dapm_routes, + ARRAY_SIZE(adau1781_adc_dapm_routes)); + if (ret) + return ret; + } + + switch (adau->type) { + case ADAU1381: + firmware = ADAU1381_FIRMWARE; + break; + case ADAU1781: + firmware = ADAU1781_FIRMWARE; + break; + default: + return -EINVAL; + } + + ret = adau17x1_add_routes(codec); + if (ret < 0) + return ret; + + ret = adau17x1_load_firmware(adau, codec->dev, firmware); + if (ret) + dev_warn(codec->dev, "Failed to load firmware\n"); + + return 0; +} + +static const struct snd_soc_codec_driver adau1781_codec_driver = { + .probe = adau1781_codec_probe, + .suspend = adau17x1_suspend, + .resume = adau17x1_resume, + .set_bias_level = adau1781_set_bias_level, + + .controls = adau1781_controls, + .num_controls = ARRAY_SIZE(adau1781_controls), + .dapm_widgets = adau1781_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(adau1781_dapm_widgets), + .dapm_routes = adau1781_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(adau1781_dapm_routes), +}; + +#define ADAU1781_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_driver adau1781_dai_driver = { + .name = "adau-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = ADAU1781_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_96000, + .formats = ADAU1781_FORMATS, + }, + .ops = &adau17x1_dai_ops, +}; + +const struct regmap_config adau1781_regmap_config = { + .val_bits = 8, + .reg_bits = 16, + .max_register = 0x40f8, + .reg_defaults = adau1781_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(adau1781_reg_defaults), + .readable_reg = adau1781_readable_register, + .volatile_reg = adau17x1_volatile_register, + .cache_type = REGCACHE_RBTREE, +}; +EXPORT_SYMBOL_GPL(adau1781_regmap_config); + +int adau1781_probe(struct device *dev, struct regmap *regmap, + enum adau17x1_type type, void (*switch_mode)(struct device *dev)) +{ + int ret; + + ret = adau17x1_probe(dev, regmap, type, switch_mode); + if (ret) + return ret; + + return snd_soc_register_codec(dev, &adau1781_codec_driver, + &adau1781_dai_driver, 1); +} +EXPORT_SYMBOL_GPL(adau1781_probe); + +MODULE_DESCRIPTION("ASoC ADAU1381/ADAU1781 driver"); +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/adau1781.h b/sound/soc/codecs/adau1781.h new file mode 100644 index 000000000000..2b96e0a9ff2e --- /dev/null +++ b/sound/soc/codecs/adau1781.h @@ -0,0 +1,23 @@ +/* + * ADAU1381/ADAU1781 driver + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __SOUND_SOC_CODECS_ADAU1781_H__ +#define __SOUND_SOC_CODECS_ADAU1781_H__ + +#include +#include "adau17x1.h" + +struct device; + +int adau1781_probe(struct device *dev, struct regmap *regmap, + enum adau17x1_type type, void (*switch_mode)(struct device *dev)); + +extern const struct regmap_config adau1781_regmap_config; + +#endif -- cgit v1.2.3 From 2c1f1ff0f0d9e0df8c9b6d3697ac250900091541 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Apr 2014 12:56:34 +0200 Subject: driver core: dev_set_drvdata returns void dev_set_drvdata can no longer fail, so it could return void. All callers have hopefully been updated to no longer check for the return value. Signed-off-by: Jean Delvare Signed-off-by: Greg Kroah-Hartman --- drivers/base/dd.c | 3 +-- include/linux/device.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/dd.c b/drivers/base/dd.c index d14b6e895896..d21f4b8dc37b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -600,9 +600,8 @@ void *dev_get_drvdata(const struct device *dev) } EXPORT_SYMBOL(dev_get_drvdata); -int dev_set_drvdata(struct device *dev, void *data) +void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; - return 0; } EXPORT_SYMBOL(dev_set_drvdata); diff --git a/include/linux/device.h b/include/linux/device.h index 5c94ac3e7972..6d3a75773cd4 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -911,7 +911,7 @@ extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); extern void *dev_get_drvdata(const struct device *dev); -extern int dev_set_drvdata(struct device *dev, void *data); +extern void dev_set_drvdata(struct device *dev, void *data); static inline bool device_supports_offline(struct device *dev) { -- cgit v1.2.3 From a996d010b648788b615938f6a26be6cf08d96aaf Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Apr 2014 12:58:53 +0200 Subject: driver core: Inline dev_set/get_drvdata dev_set_drvdata and dev_get_drvdata are now simple enough again that we can inline them as they used to be before commit b40284378. Signed-off-by: Jean Delvare Signed-off-by: Greg Kroah-Hartman --- drivers/base/dd.c | 16 ---------------- include/linux/device.h | 12 ++++++++++-- 2 files changed, 10 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/dd.c b/drivers/base/dd.c index ba03353ff243..e4ffbcf2f519 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -587,19 +587,3 @@ void driver_detach(struct device_driver *drv) put_device(dev); } } - -/* - * These exports can't be _GPL due to .h files using this within them, and it - * might break something that was previously working... - */ -void *dev_get_drvdata(const struct device *dev) -{ - return dev->driver_data; -} -EXPORT_SYMBOL(dev_get_drvdata); - -void dev_set_drvdata(struct device *dev, void *data) -{ - dev->driver_data = data; -} -EXPORT_SYMBOL(dev_set_drvdata); diff --git a/include/linux/device.h b/include/linux/device.h index 6d3a75773cd4..1b18c886445c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -826,6 +826,16 @@ static inline void set_dev_node(struct device *dev, int node) } #endif +static inline void *dev_get_drvdata(const struct device *dev) +{ + return dev->driver_data; +} + +static inline void dev_set_drvdata(struct device *dev, void *data) +{ + dev->driver_data = data; +} + static inline struct pm_subsys_data *dev_to_psd(struct device *dev) { return dev ? dev->power.subsys_data : NULL; @@ -910,8 +920,6 @@ extern int device_move(struct device *dev, struct device *new_parent, extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); -extern void *dev_get_drvdata(const struct device *dev); -extern void dev_set_drvdata(struct device *dev, void *data); static inline bool device_supports_offline(struct device *dev) { -- cgit v1.2.3 From 34470e0bfae223e3f22bd2bd6e0e1dac366c9290 Mon Sep 17 00:00:00 2001 From: David Fries Date: Tue, 8 Apr 2014 22:37:08 -0500 Subject: connector: allow multiple messages to be sent in one packet This increases the amount of bundling to reduce the number of packets sent. For the one wire use there can be multiple struct w1_netlink_cmd in a struct w1_netlink_msg and multiple of those in struct cn_msg, and with this change multiple of those in a struct nlmsghdr, and at each level the len identifies there being multiple of the next. Signed-off-by: David Fries Acked-by: Evgeniy Polyakov Signed-off-by: Greg Kroah-Hartman --- Documentation/connector/connector.txt | 13 ++++++++++--- drivers/connector/connector.c | 17 +++++++++++++++-- include/linux/connector.h | 1 + 3 files changed, 26 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt index e5c5f5e6ab70..e56abdb21975 100644 --- a/Documentation/connector/connector.txt +++ b/Documentation/connector/connector.txt @@ -24,7 +24,8 @@ netlink based networking for inter-process communication in a significantly easier way: int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *)); -void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask); +void cn_netlink_send_multi(struct cn_msg *msg, u16 len, u32 portid, u32 __group, int gfp_mask); +void cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, int gfp_mask); struct cb_id { @@ -71,15 +72,21 @@ void cn_del_callback(struct cb_id *id); struct cb_id *id - unique connector's user identifier. -int cn_netlink_send(struct cn_msg *msg, u32 __groups, int gfp_mask); +int cn_netlink_send_multi(struct cn_msg *msg, u16 len, u32 portid, u32 __groups, int gfp_mask); +int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __groups, int gfp_mask); Sends message to the specified groups. It can be safely called from softirq context, but may silently fail under strong memory pressure. If there are no listeners for given group -ESRCH can be returned. struct cn_msg * - message header(with attached data). + u16 len - for *_multi multiple cn_msg messages can be sent + u32 port - destination port. + If non-zero the message will be sent to the + given port, which should be set to the + original sender. u32 __group - destination group. - If __group is zero, then appropriate group will + If port and __group is zero, then appropriate group will be searched through all registered connector users, and message will be delivered to the group which was created for user with the same ID as in msg. diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index b14f1d36f897..f612d68629dc 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -43,6 +43,8 @@ static struct cn_dev cdev; static int cn_already_initialized; /* + * Sends mult (multiple) cn_msg at a time. + * * msg->seq and msg->ack are used to determine message genealogy. * When someone sends message it puts there locally unique sequence * and random acknowledge numbers. Sequence number may be copied into @@ -62,10 +64,13 @@ static int cn_already_initialized; * the acknowledgement number in the original message + 1, then it is * a new message. * + * If msg->len != len, then additional cn_msg messages are expected following + * the first msg. + * * The message is sent to, the portid if given, the group if given, both if * both, or if both are zero then the group is looked up and sent there. */ -int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, +int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, gfp_t gfp_mask) { struct cn_callback_entry *__cbq; @@ -98,7 +103,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, if (!portid && !netlink_has_listeners(dev->nls, group)) return -ESRCH; - size = sizeof(*msg) + msg->len; + size = sizeof(*msg) + len; skb = nlmsg_new(size, gfp_mask); if (!skb) @@ -121,6 +126,14 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, gfp_mask); return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); } +EXPORT_SYMBOL_GPL(cn_netlink_send_mult); + +/* same as cn_netlink_send_mult except msg->len is used for len */ +int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, + gfp_t gfp_mask) +{ + return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); +} EXPORT_SYMBOL_GPL(cn_netlink_send); /* diff --git a/include/linux/connector.h b/include/linux/connector.h index be9c4747d511..f8fe8637d771 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -71,6 +71,7 @@ struct cn_dev { int cn_add_callback(struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); void cn_del_callback(struct cb_id *); +int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, -- cgit v1.2.3 From 451ef1caa8698511bb7766344ccec9f08d5d294b Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 13 May 2014 09:05:26 -0700 Subject: init.h: Update initcall_sync variants to fix build errors We are getting randconfig build errors on device drivers with tristate Kconfig option if they are using custom initcall levels. Rather than add ifdeffery into the drivers, let's add the missing initcall_sync variants. As the comment in init.h has kept people from updating the list of initcalls that can be just module_init when the driver is loaded as a loadable module, let's also update the comment a bit to describe valid use cases custom initcall levels. While most drivers should nowadays use just regular module_init because of the deferred probe, we do have quite a few custom initcall levels left that we cannot remove until tested properly. There are also still few valid cases where a custom initcall level might make sense that I'm aware of. For example a bus snooping driver can provide information about invalid bus access and is handy loader early when built in. But there's no hard dependency to have it necessarily built in and a loadable module is a valid option. Another example is a driver implementing a Linux framework like pinctrl framework. That driver may be needed early on some platforms because of legacy reasons, while it can be just a regular module_init on most platforms. Signed-off-by: Tony Lindgren Cc: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- include/linux/init.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/init.h b/include/linux/init.h index a3ba27076342..2df8e8dd10a4 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -297,16 +297,28 @@ void __init parse_early_options(char *cmdline); #else /* MODULE */ -/* Don't use these in loadable modules, but some people do... */ +/* + * In most cases loadable modules do not need custom + * initcall levels. There are still some valid cases where + * a driver may be needed early if built in, and does not + * matter when built as a loadable module. Like bus + * snooping debug drivers. + */ #define early_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn) +#define core_initcall_sync(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn) +#define postcore_initcall_sync(fn) module_init(fn) #define arch_initcall(fn) module_init(fn) #define subsys_initcall(fn) module_init(fn) +#define subsys_initcall_sync(fn) module_init(fn) #define fs_initcall(fn) module_init(fn) +#define fs_initcall_sync(fn) module_init(fn) #define rootfs_initcall(fn) module_init(fn) #define device_initcall(fn) module_init(fn) +#define device_initcall_sync(fn) module_init(fn) #define late_initcall(fn) module_init(fn) +#define late_initcall_sync(fn) module_init(fn) #define console_initcall(fn) module_init(fn) #define security_initcall(fn) module_init(fn) -- cgit v1.2.3 From 78e1da627040ca49c41b456db707342ef210ae0f Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 16 Apr 2014 11:56:45 +0200 Subject: sysfs.h: don't return a void-valued expression in sysfs_remove_file Sparse was complaining about that: include/linux/sysfs.h:432:9: warning: returning void-valued expression Signed-off-by: Simon Wunderlich Signed-off-by: Greg Kroah-Hartman --- include/linux/sysfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 5ffaa3443712..f97d0dbb59fa 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -437,7 +437,7 @@ static inline int __must_check sysfs_create_file(struct kobject *kobj, static inline void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr) { - return sysfs_remove_file_ns(kobj, attr, NULL); + sysfs_remove_file_ns(kobj, attr, NULL); } static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target, -- cgit v1.2.3 From 26fc9cd200ec839e0b3095e05ae018f27314e7aa Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Sat, 26 Apr 2014 15:40:28 +0800 Subject: kernfs: move the last knowledge of sysfs out from kernfs There is still one residue of sysfs remaining: the sb_magic SYSFS_MAGIC. However this should be kernfs user specific, so this patch moves it out. Kerrnfs user should specify their magic number while mouting. Signed-off-by: Jianyu Zhan Acked-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/mount.c | 11 ++++++----- fs/sysfs/mount.c | 4 +++- include/linux/kernfs.h | 13 ++++++++----- kernel/cgroup.c | 4 +++- 4 files changed, 20 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index f25a7c0c3cdc..d171b98a6cdd 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -62,7 +62,7 @@ struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) return NULL; } -static int kernfs_fill_super(struct super_block *sb) +static int kernfs_fill_super(struct super_block *sb, unsigned long magic) { struct kernfs_super_info *info = kernfs_info(sb); struct inode *inode; @@ -71,7 +71,7 @@ static int kernfs_fill_super(struct super_block *sb) info->sb = sb; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = SYSFS_MAGIC; + sb->s_magic = magic; sb->s_op = &kernfs_sops; sb->s_time_gran = 1; @@ -132,6 +132,7 @@ const void *kernfs_super_ns(struct super_block *sb) * @fs_type: file_system_type of the fs being mounted * @flags: mount flags specified for the mount * @root: kernfs_root of the hierarchy being mounted + * @magic: file system specific magic number * @new_sb_created: tell the caller if we allocated a new superblock * @ns: optional namespace tag of the mount * @@ -143,8 +144,8 @@ const void *kernfs_super_ns(struct super_block *sb) * The return value can be passed to the vfs layer verbatim. */ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, bool *new_sb_created, - const void *ns) + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns) { struct super_block *sb; struct kernfs_super_info *info; @@ -169,7 +170,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, if (!sb->s_root) { struct kernfs_super_info *info = kernfs_info(sb); - error = kernfs_fill_super(sb); + error = kernfs_fill_super(sb, magic); if (error) { deactivate_locked_super(sb); return ERR_PTR(error); diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index 8794423f7efb..8a49486bf30c 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -13,6 +13,7 @@ #define DEBUG #include +#include #include #include #include @@ -38,7 +39,8 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, } ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); - root = kernfs_mount_ns(fs_type, flags, sysfs_root, &new_sb, ns); + root = kernfs_mount_ns(fs_type, flags, sysfs_root, + SYSFS_MAGIC, &new_sb, ns); if (IS_ERR(root) || !new_sb) kobj_ns_drop(KOBJ_NS_TYPE_NET, ns); return root; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index c841688a78a3..17aa1cce6f8e 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -301,8 +301,8 @@ void kernfs_notify(struct kernfs_node *kn); const void *kernfs_super_ns(struct super_block *sb); struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, bool *new_sb_created, - const void *ns); + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns); void kernfs_kill_sb(struct super_block *sb); void kernfs_init(void); @@ -395,7 +395,8 @@ static inline const void *kernfs_super_ns(struct super_block *sb) static inline struct dentry * kernfs_mount_ns(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, bool *new_sb_created, const void *ns) + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns) { return ERR_PTR(-ENOSYS); } static inline void kernfs_kill_sb(struct super_block *sb) { } @@ -453,9 +454,11 @@ static inline int kernfs_rename(struct kernfs_node *kn, static inline struct dentry * kernfs_mount(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, bool *new_sb_created) + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created) { - return kernfs_mount_ns(fs_type, flags, root, new_sb_created, NULL); + return kernfs_mount_ns(fs_type, flags, root, + magic, new_sb_created, NULL); } #endif /* __LINUX_KERNFS_H */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3f1ca934a237..ceee0c54c6a4 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1604,7 +1605,8 @@ out_unlock: if (ret) return ERR_PTR(ret); - dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb); + dentry = kernfs_mount(fs_type, flags, root->kf_root, + CGROUP_SUPER_MAGIC, &new_sb); if (IS_ERR(dentry) || !new_sb) cgroup_put(&root->cgrp); return dentry; -- cgit v1.2.3 From 2d53139f31626bad6f8983d8e519ddde2cbba921 Mon Sep 17 00:00:00 2001 From: David Mosberger Date: Mon, 28 Apr 2014 22:14:07 -0600 Subject: Add support for using a MAX3421E chip as a host driver. Signed-off-by: David Mosberger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/Makefile | 1 + drivers/usb/host/Kconfig | 11 + drivers/usb/host/Makefile | 1 + drivers/usb/host/max3421-hcd.c | 1937 +++++++++++++++++++++++++++++ include/linux/platform_data/max3421-hcd.h | 23 + 5 files changed, 1973 insertions(+) create mode 100644 drivers/usb/host/max3421-hcd.c create mode 100644 include/linux/platform_data/max3421-hcd.h (limited to 'include/linux') diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 1ae2bf39d84b..9bb672199703 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_USB_IMX21_HCD) += host/ obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ obj-$(CONFIG_USB_FUSBH200_HCD) += host/ obj-$(CONFIG_USB_FOTG210_HCD) += host/ +obj-$(CONFIG_USB_MAX3421_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 7a39ae86d5ce..52144c720a1d 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -342,6 +342,17 @@ config USB_FOTG210_HCD To compile this driver as a module, choose M here: the module will be called fotg210-hcd. +config USB_MAX3421_HCD + tristate "MAX3421 HCD (USB-over-SPI) support" + depends on USB && SPI + ---help--- + The Maxim MAX3421E chip supports standard USB 2.0-compliant + full-speed devices either in host or peripheral mode. This + driver supports the host-mode of the MAX3421E only. + + To compile this driver as a module, choose M here: the module will + be called max3421-hcd. + config USB_OHCI_HCD tristate "OHCI HCD (USB 1.1) support" select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 7530468c9a4f..ea2bec52a4fb 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -70,3 +70,4 @@ obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o +obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c new file mode 100644 index 000000000000..dfc74d6738db --- /dev/null +++ b/drivers/usb/host/max3421-hcd.c @@ -0,0 +1,1937 @@ +/* + * MAX3421 Host Controller driver for USB. + * + * Author: David Mosberger-Tang + * + * (C) Copyright 2014 David Mosberger-Tang + * + * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host + * controller on a SPI bus. + * + * Based on: + * o MAX3421E datasheet + * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf + * o MAX3421E Programming Guide + * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf + * o gadget/dummy_hcd.c + * For USB HCD implementation. + * o Arduino MAX3421 driver + * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp + * + * This file is licenced under the GPL v2. + * + * Important note on worst-case (full-speed) packet size constraints + * (See USB 2.0 Section 5.6.3 and following): + * + * - control: 64 bytes + * - isochronous: 1023 bytes + * - interrupt: 64 bytes + * - bulk: 64 bytes + * + * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about + * multi-FIFO writes/reads for a single USB packet *except* for isochronous + * transfers. We don't support isochronous transfers at this time, so we + * just assume that a USB packet always fits into a single FIFO buffer. + * + * NOTE: The June 2006 version of "MAX3421E Programming Guide" + * (AN3785) has conflicting info for the RCVDAVIRQ bit: + * + * The description of RCVDAVIRQ says "The CPU *must* clear + * this IRQ bit (by writing a 1 to it) before reading the + * RCVFIFO data. + * + * However, the earlier section on "Programming BULK-IN + * Transfers" says * that: + * + * After the CPU retrieves the data, it clears the + * RCVDAVIRQ bit. + * + * The December 2006 version has been corrected and it consistently + * states the second behavior is the correct one. + * + * Synchronous SPI transactions sleep so we can't perform any such + * transactions while holding a spin-lock (and/or while interrupts are + * masked). To achieve this, all SPI transactions are issued from a + * single thread (max3421_spi_thread). + */ + +#include +#include +#include +#include + +#include + +#define DRIVER_DESC "MAX3421 USB Host-Controller Driver" +#define DRIVER_VERSION "1.0" + +/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */ +#define USB_MAX_FRAME_NUMBER 0x7ff +#define USB_MAX_RETRIES 3 /* # of retries before error is reported */ + +/* + * Max. # of times we're willing to retransmit a request immediately in + * resposne to a NAK. Afterwards, we fall back on trying once a frame. + */ +#define NAK_MAX_FAST_RETRANSMITS 2 + +#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */ + +/* Port-change mask: */ +#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \ + USB_PORT_STAT_C_ENABLE | \ + USB_PORT_STAT_C_SUSPEND | \ + USB_PORT_STAT_C_OVERCURRENT | \ + USB_PORT_STAT_C_RESET) << 16) + +enum max3421_rh_state { + MAX3421_RH_RESET, + MAX3421_RH_SUSPENDED, + MAX3421_RH_RUNNING +}; + +enum pkt_state { + PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */ + PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */ + PKT_STATE_TERMINATE /* waiting to terminate control transfer */ +}; + +enum scheduling_pass { + SCHED_PASS_PERIODIC, + SCHED_PASS_NON_PERIODIC, + SCHED_PASS_DONE +}; + +struct max3421_hcd { + spinlock_t lock; + + struct task_struct *spi_thread; + + struct max3421_hcd *next; + + enum max3421_rh_state rh_state; + /* lower 16 bits contain port status, upper 16 bits the change mask: */ + u32 port_status; + + unsigned active:1; + + struct list_head ep_list; /* list of EP's with work */ + + /* + * The following are owned by spi_thread (may be accessed by + * SPI-thread without acquiring the HCD lock: + */ + u8 rev; /* chip revision */ + u16 frame_number; + /* + * URB we're currently processing. Must not be reset to NULL + * unless MAX3421E chip is idle: + */ + struct urb *curr_urb; + enum scheduling_pass sched_pass; + struct usb_device *loaded_dev; /* dev that's loaded into the chip */ + int loaded_epnum; /* epnum whose toggles are loaded */ + int urb_done; /* > 0 -> no errors, < 0: errno */ + size_t curr_len; + u8 hien; + u8 mode; + u8 iopins[2]; + unsigned int do_enable_irq:1; + unsigned int do_reset_hcd:1; + unsigned int do_reset_port:1; + unsigned int do_check_unlink:1; + unsigned int do_iopin_update:1; +#ifdef DEBUG + unsigned long err_stat[16]; +#endif +}; + +struct max3421_ep { + struct usb_host_endpoint *ep; + struct list_head ep_list; + u32 naks; + u16 last_active; /* frame # this ep was last active */ + enum pkt_state pkt_state; + u8 retries; + u8 retransmit; /* packet needs retransmission */ +}; + +static struct max3421_hcd *max3421_hcd_list; + +#define MAX3421_FIFO_SIZE 64 + +#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */ +#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */ + +/* SPI commands: */ +#define MAX3421_SPI_DIR_SHIFT 1 +#define MAX3421_SPI_REG_SHIFT 3 + +#define MAX3421_REG_RCVFIFO 1 +#define MAX3421_REG_SNDFIFO 2 +#define MAX3421_REG_SUDFIFO 4 +#define MAX3421_REG_RCVBC 6 +#define MAX3421_REG_SNDBC 7 +#define MAX3421_REG_USBIRQ 13 +#define MAX3421_REG_USBIEN 14 +#define MAX3421_REG_USBCTL 15 +#define MAX3421_REG_CPUCTL 16 +#define MAX3421_REG_PINCTL 17 +#define MAX3421_REG_REVISION 18 +#define MAX3421_REG_IOPINS1 20 +#define MAX3421_REG_IOPINS2 21 +#define MAX3421_REG_GPINIRQ 22 +#define MAX3421_REG_GPINIEN 23 +#define MAX3421_REG_GPINPOL 24 +#define MAX3421_REG_HIRQ 25 +#define MAX3421_REG_HIEN 26 +#define MAX3421_REG_MODE 27 +#define MAX3421_REG_PERADDR 28 +#define MAX3421_REG_HCTL 29 +#define MAX3421_REG_HXFR 30 +#define MAX3421_REG_HRSL 31 + +enum { + MAX3421_USBIRQ_OSCOKIRQ_BIT = 0, + MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5, + MAX3421_USBIRQ_VBUSIRQ_BIT +}; + +enum { + MAX3421_CPUCTL_IE_BIT = 0, + MAX3421_CPUCTL_PULSEWID0_BIT = 6, + MAX3421_CPUCTL_PULSEWID1_BIT +}; + +enum { + MAX3421_USBCTL_PWRDOWN_BIT = 4, + MAX3421_USBCTL_CHIPRES_BIT +}; + +enum { + MAX3421_PINCTL_GPXA_BIT = 0, + MAX3421_PINCTL_GPXB_BIT, + MAX3421_PINCTL_POSINT_BIT, + MAX3421_PINCTL_INTLEVEL_BIT, + MAX3421_PINCTL_FDUPSPI_BIT, + MAX3421_PINCTL_EP0INAK_BIT, + MAX3421_PINCTL_EP2INAK_BIT, + MAX3421_PINCTL_EP3INAK_BIT, +}; + +enum { + MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */ + MAX3421_HI_RWU_BIT, /* remote wakeup */ + MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */ + MAX3421_HI_SNDBAV_BIT, /* send buffer available */ + MAX3421_HI_SUSDN_BIT, /* suspend operation done */ + MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */ + MAX3421_HI_FRAME_BIT, /* frame generator */ + MAX3421_HI_HXFRDN_BIT, /* host transfer done */ +}; + +enum { + MAX3421_HCTL_BUSRST_BIT = 0, + MAX3421_HCTL_FRMRST_BIT, + MAX3421_HCTL_SAMPLEBUS_BIT, + MAX3421_HCTL_SIGRSM_BIT, + MAX3421_HCTL_RCVTOG0_BIT, + MAX3421_HCTL_RCVTOG1_BIT, + MAX3421_HCTL_SNDTOG0_BIT, + MAX3421_HCTL_SNDTOG1_BIT +}; + +enum { + MAX3421_MODE_HOST_BIT = 0, + MAX3421_MODE_LOWSPEED_BIT, + MAX3421_MODE_HUBPRE_BIT, + MAX3421_MODE_SOFKAENAB_BIT, + MAX3421_MODE_SEPIRQ_BIT, + MAX3421_MODE_DELAYISO_BIT, + MAX3421_MODE_DMPULLDN_BIT, + MAX3421_MODE_DPPULLDN_BIT +}; + +enum { + MAX3421_HRSL_OK = 0, + MAX3421_HRSL_BUSY, + MAX3421_HRSL_BADREQ, + MAX3421_HRSL_UNDEF, + MAX3421_HRSL_NAK, + MAX3421_HRSL_STALL, + MAX3421_HRSL_TOGERR, + MAX3421_HRSL_WRONGPID, + MAX3421_HRSL_BADBC, + MAX3421_HRSL_PIDERR, + MAX3421_HRSL_PKTERR, + MAX3421_HRSL_CRCERR, + MAX3421_HRSL_KERR, + MAX3421_HRSL_JERR, + MAX3421_HRSL_TIMEOUT, + MAX3421_HRSL_BABBLE, + MAX3421_HRSL_RESULT_MASK = 0xf, + MAX3421_HRSL_RCVTOGRD_BIT = 4, + MAX3421_HRSL_SNDTOGRD_BIT, + MAX3421_HRSL_KSTATUS_BIT, + MAX3421_HRSL_JSTATUS_BIT +}; + +/* Return same error-codes as ohci.h:cc_to_error: */ +static const int hrsl_to_error[] = { + [MAX3421_HRSL_OK] = 0, + [MAX3421_HRSL_BUSY] = -EINVAL, + [MAX3421_HRSL_BADREQ] = -EINVAL, + [MAX3421_HRSL_UNDEF] = -EINVAL, + [MAX3421_HRSL_NAK] = -EAGAIN, + [MAX3421_HRSL_STALL] = -EPIPE, + [MAX3421_HRSL_TOGERR] = -EILSEQ, + [MAX3421_HRSL_WRONGPID] = -EPROTO, + [MAX3421_HRSL_BADBC] = -EREMOTEIO, + [MAX3421_HRSL_PIDERR] = -EPROTO, + [MAX3421_HRSL_PKTERR] = -EPROTO, + [MAX3421_HRSL_CRCERR] = -EILSEQ, + [MAX3421_HRSL_KERR] = -EIO, + [MAX3421_HRSL_JERR] = -EIO, + [MAX3421_HRSL_TIMEOUT] = -ETIME, + [MAX3421_HRSL_BABBLE] = -EOVERFLOW +}; + +/* + * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a + * reasonable overview of how control transfers use the the IN/OUT + * tokens. + */ +#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */ +#define MAX3421_HXFR_SETUP 0x10 +#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */ +#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep)) +#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep)) +#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */ +#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */ + +#define field(val, bit) ((val) << (bit)) + +static inline s16 +frame_diff(u16 left, u16 right) +{ + return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1); +} + +static inline struct max3421_hcd * +hcd_to_max3421(struct usb_hcd *hcd) +{ + return (struct max3421_hcd *) hcd->hcd_priv; +} + +static inline struct usb_hcd * +max3421_to_hcd(struct max3421_hcd *max3421_hcd) +{ + return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv); +} + +static u8 +spi_rd8(struct usb_hcd *hcd, unsigned int reg) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct spi_transfer transfer; + u8 tx_data[1]; + /* + * RX data must be in its own cache-line so it stays flushed + * from the cache until the transfer is complete. Otherwise, + * we get stale data from the cache. + */ + u8 rx_data[SMP_CACHE_BYTES] ____cacheline_aligned; + struct spi_message msg; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + tx_data[0] = (field(reg, MAX3421_SPI_REG_SHIFT) | + field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT)); + + transfer.tx_buf = tx_data; + transfer.rx_buf = rx_data; + transfer.len = 2; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); + + return rx_data[1]; +} + +static void +spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct spi_transfer transfer; + struct spi_message msg; + u8 tx_data[2]; + + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + tx_data[0] = (field(reg, MAX3421_SPI_REG_SHIFT) | + field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT)); + tx_data[1] = val; + + transfer.tx_buf = tx_data; + transfer.len = 2; + + spi_message_add_tail(&transfer, &msg); + spi_sync(spi, &msg); +} + +static void +spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct spi_transfer transfer[2]; + struct spi_message msg; + u8 cmd; + + memset(transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + cmd = (field(reg, MAX3421_SPI_REG_SHIFT) | + field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT)); + + transfer[0].tx_buf = &cmd; + transfer[0].len = 1; + + transfer[1].rx_buf = buf; + transfer[1].len = len; + + spi_message_add_tail(&transfer[0], &msg); + spi_message_add_tail(&transfer[1], &msg); + spi_sync(spi, &msg); +} + +static void +spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct spi_transfer transfer[2]; + struct spi_message msg; + u8 cmd; + + memset(transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + + cmd = (field(reg, MAX3421_SPI_REG_SHIFT) | + field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT)); + + transfer[0].tx_buf = &cmd; + transfer[0].len = 1; + + transfer[1].tx_buf = buf; + transfer[1].len = len; + + spi_message_add_tail(&transfer[0], &msg); + spi_message_add_tail(&transfer[1], &msg); + spi_sync(spi, &msg); +} + +/* + * Figure out the correct setting for the LOWSPEED and HUBPRE mode + * bits. The HUBPRE bit needs to be set when MAX3421E operates at + * full speed, but it's talking to a low-speed device (i.e., through a + * hub). Setting that bit ensures that every low-speed packet is + * preceded by a full-speed PRE PID. Possible configurations: + * + * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit: + * FULL FULL => 0 0 + * FULL LOW => 1 1 + * LOW LOW => 1 0 + * LOW FULL => 1 0 + */ +static void +max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode; + + mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT); + mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT); + if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) { + mode |= mode_lowspeed; + mode &= ~mode_hubpre; + } else if (dev->speed == USB_SPEED_LOW) { + mode |= mode_lowspeed | mode_hubpre; + } else { + mode &= ~(mode_lowspeed | mode_hubpre); + } + if (mode != max3421_hcd->mode) { + max3421_hcd->mode = mode; + spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode); + } + +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static void +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, + int force_toggles) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + int old_epnum, same_ep, rcvtog, sndtog; + struct usb_device *old_dev; + u8 hctl; + + old_dev = max3421_hcd->loaded_dev; + old_epnum = max3421_hcd->loaded_epnum; + + same_ep = (dev == old_dev && epnum == old_epnum); + if (same_ep && !force_toggles) + return; + + if (old_dev && !same_ep) { + /* save the old end-points toggles: */ + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + + rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; + sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + + /* no locking: HCD (i.e., we) own toggles, don't we? */ + usb_settoggle(old_dev, old_epnum, 0, rcvtog); + usb_settoggle(old_dev, old_epnum, 1, sndtog); + } + /* setup new endpoint's toggle bits: */ + rcvtog = usb_gettoggle(dev, epnum, 0); + sndtog = usb_gettoggle(dev, epnum, 1); + hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | + BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); + + max3421_hcd->loaded_epnum = epnum; + spi_wr8(hcd, MAX3421_REG_HCTL, hctl); + + /* + * Note: devnum for one and the same device can change during + * address-assignment so it's best to just always load the + * address whenever the end-point changed/was forced. + */ + max3421_hcd->loaded_dev = dev; + spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); +} + +static int +max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb) +{ + spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8); + return MAX3421_HXFR_SETUP; +} + +static int +max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + int epnum = usb_pipeendpoint(urb->pipe); + + max3421_hcd->curr_len = 0; + max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT); + return MAX3421_HXFR_BULK_IN(epnum); +} + +static int +max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + int epnum = usb_pipeendpoint(urb->pipe); + u32 max_packet; + void *src; + + src = urb->transfer_buffer + urb->actual_length; + + if (fast_retransmit) { + if (max3421_hcd->rev == 0x12) { + /* work around rev 0x12 bug: */ + spi_wr8(hcd, MAX3421_REG_SNDBC, 0); + spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]); + spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len); + } + return MAX3421_HXFR_BULK_OUT(epnum); + } + + max_packet = usb_maxpacket(urb->dev, urb->pipe, 1); + + if (max_packet > MAX3421_FIFO_SIZE) { + /* + * We do not support isochronous transfers at this + * time. + */ + dev_err(&spi->dev, + "%s: packet-size of %u too big (limit is %u bytes)", + __func__, max_packet, MAX3421_FIFO_SIZE); + max3421_hcd->urb_done = -EMSGSIZE; + return -EMSGSIZE; + } + max3421_hcd->curr_len = min((urb->transfer_buffer_length - + urb->actual_length), max_packet); + + spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len); + spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len); + return MAX3421_HXFR_BULK_OUT(epnum); +} + +/* + * Issue the next host-transfer command. + * Caller must NOT hold HCD spinlock. + */ +static void +max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct urb *urb = max3421_hcd->curr_urb; + struct max3421_ep *max3421_ep = urb->ep->hcpriv; + int cmd = -EINVAL; + + if (!urb) + return; /* nothing to do */ + + switch (max3421_ep->pkt_state) { + case PKT_STATE_SETUP: + cmd = max3421_ctrl_setup(hcd, urb); + break; + + case PKT_STATE_TRANSFER: + if (usb_urb_dir_in(urb)) + cmd = max3421_transfer_in(hcd, urb); + else + cmd = max3421_transfer_out(hcd, urb, fast_retransmit); + break; + + case PKT_STATE_TERMINATE: + /* + * IN transfers are terminated with HS_OUT token, + * OUT transfers with HS_IN: + */ + if (usb_urb_dir_in(urb)) + cmd = MAX3421_HXFR_HS_OUT; + else + cmd = MAX3421_HXFR_HS_IN; + break; + } + + if (cmd < 0) + return; + + /* issue the command and wait for host-xfer-done interrupt: */ + + spi_wr8(hcd, MAX3421_REG_HXFR, cmd); + max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT); +} + +/* + * Find the next URB to process and start its execution. + * + * At this time, we do not anticipate ever connecting a USB hub to the + * MAX3421 chip, so at most USB device can be connected and we can use + * a simplistic scheduler: at the start of a frame, schedule all + * periodic transfers. Once that is done, use the remainder of the + * frame to process non-periodic (bulk & control) transfers. + * + * Preconditions: + * o Caller must NOT hold HCD spinlock. + * o max3421_hcd->curr_urb MUST BE NULL. + * o MAX3421E chip must be idle. + */ +static int +max3421_select_and_start_urb(struct usb_hcd *hcd) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct urb *urb, *curr_urb = NULL; + struct max3421_ep *max3421_ep; + int epnum, force_toggles = 0; + struct usb_host_endpoint *ep; + struct list_head *pos; + unsigned long flags; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + + for (; + max3421_hcd->sched_pass < SCHED_PASS_DONE; + ++max3421_hcd->sched_pass) + list_for_each(pos, &max3421_hcd->ep_list) { + urb = NULL; + max3421_ep = container_of(pos, struct max3421_ep, + ep_list); + ep = max3421_ep->ep; + + switch (usb_endpoint_type(&ep->desc)) { + case USB_ENDPOINT_XFER_ISOC: + case USB_ENDPOINT_XFER_INT: + if (max3421_hcd->sched_pass != + SCHED_PASS_PERIODIC) + continue; + break; + + case USB_ENDPOINT_XFER_CONTROL: + case USB_ENDPOINT_XFER_BULK: + if (max3421_hcd->sched_pass != + SCHED_PASS_NON_PERIODIC) + continue; + break; + } + + if (list_empty(&ep->urb_list)) + continue; /* nothing to do */ + urb = list_first_entry(&ep->urb_list, struct urb, + urb_list); + if (urb->unlinked) { + dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", + __func__, urb, urb->unlinked); + max3421_hcd->curr_urb = urb; + max3421_hcd->urb_done = 1; + spin_unlock_irqrestore(&max3421_hcd->lock, + flags); + return 1; + } + + switch (usb_endpoint_type(&ep->desc)) { + case USB_ENDPOINT_XFER_CONTROL: + /* + * Allow one control transaction per + * frame per endpoint: + */ + if (frame_diff(max3421_ep->last_active, + max3421_hcd->frame_number) == 0) + continue; + break; + + case USB_ENDPOINT_XFER_BULK: + if (max3421_ep->retransmit + && (frame_diff(max3421_ep->last_active, + max3421_hcd->frame_number) + == 0)) + /* + * We already tried this EP + * during this frame and got a + * NAK or error; wait for next frame + */ + continue; + break; + + case USB_ENDPOINT_XFER_ISOC: + case USB_ENDPOINT_XFER_INT: + if (frame_diff(max3421_hcd->frame_number, + max3421_ep->last_active) + < urb->interval) + /* + * We already processed this + * end-point in the current + * frame + */ + continue; + break; + } + + /* move current ep to tail: */ + list_move_tail(pos, &max3421_hcd->ep_list); + curr_urb = urb; + goto done; + } +done: + if (!curr_urb) { + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + return 0; + } + + urb = max3421_hcd->curr_urb = curr_urb; + epnum = usb_endpoint_num(&urb->ep->desc); + if (max3421_ep->retransmit) + /* restart (part of) a USB transaction: */ + max3421_ep->retransmit = 0; + else { + /* start USB transaction: */ + if (usb_endpoint_xfer_control(&ep->desc)) { + /* + * See USB 2.0 spec section 8.6.1 + * Initialization via SETUP Token: + */ + usb_settoggle(urb->dev, epnum, 0, 1); + usb_settoggle(urb->dev, epnum, 1, 1); + max3421_ep->pkt_state = PKT_STATE_SETUP; + force_toggles = 1; + } else + max3421_ep->pkt_state = PKT_STATE_TRANSFER; + } + + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + + max3421_ep->last_active = max3421_hcd->frame_number; + max3421_set_address(hcd, urb->dev, epnum, force_toggles); + max3421_set_speed(hcd, urb->dev); + max3421_next_transfer(hcd, 0); + return 1; +} + +/* + * Check all endpoints for URBs that got unlinked. + * + * Caller must NOT hold HCD spinlock. + */ +static int +max3421_check_unlink(struct usb_hcd *hcd) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct list_head *pos, *upos, *next_upos; + struct max3421_ep *max3421_ep; + struct usb_host_endpoint *ep; + struct urb *urb; + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + list_for_each(pos, &max3421_hcd->ep_list) { + max3421_ep = container_of(pos, struct max3421_ep, ep_list); + ep = max3421_ep->ep; + list_for_each_safe(upos, next_upos, &ep->urb_list) { + urb = container_of(upos, struct urb, urb_list); + if (urb->unlinked) { + retval = 1; + dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", + __func__, urb, urb->unlinked); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&max3421_hcd->lock, + flags); + usb_hcd_giveback_urb(hcd, urb, 0); + spin_lock_irqsave(&max3421_hcd->lock, flags); + } + } + } + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + return retval; +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static void +max3421_slow_retransmit(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct urb *urb = max3421_hcd->curr_urb; + struct max3421_ep *max3421_ep; + + max3421_ep = urb->ep->hcpriv; + max3421_ep->retransmit = 1; + max3421_hcd->curr_urb = NULL; +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static void +max3421_recv_data_available(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct urb *urb = max3421_hcd->curr_urb; + size_t remaining, transfer_size; + u8 rcvbc; + + rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC); + + if (rcvbc > MAX3421_FIFO_SIZE) + rcvbc = MAX3421_FIFO_SIZE; + if (urb->actual_length >= urb->transfer_buffer_length) + remaining = 0; + else + remaining = urb->transfer_buffer_length - urb->actual_length; + transfer_size = rcvbc; + if (transfer_size > remaining) + transfer_size = remaining; + if (transfer_size > 0) { + void *dst = urb->transfer_buffer + urb->actual_length; + + spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size); + urb->actual_length += transfer_size; + max3421_hcd->curr_len = transfer_size; + } + + /* ack the RCVDAV irq now that the FIFO has been read: */ + spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT)); +} + +static void +max3421_handle_error(struct usb_hcd *hcd, u8 hrsl) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK; + struct urb *urb = max3421_hcd->curr_urb; + struct max3421_ep *max3421_ep = urb->ep->hcpriv; + int switch_sndfifo; + + /* + * If an OUT command results in any response other than OK + * (i.e., error or NAK), we have to perform a dummy-write to + * SNDBC so the FIFO gets switched back to us. Otherwise, we + * get out of sync with the SNDFIFO double buffer. + */ + switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER && + usb_urb_dir_out(urb)); + + switch (result_code) { + case MAX3421_HRSL_OK: + return; /* this shouldn't happen */ + + case MAX3421_HRSL_WRONGPID: /* received wrong PID */ + case MAX3421_HRSL_BUSY: /* SIE busy */ + case MAX3421_HRSL_BADREQ: /* bad val in HXFR */ + case MAX3421_HRSL_UNDEF: /* reserved */ + case MAX3421_HRSL_KERR: /* K-state instead of response */ + case MAX3421_HRSL_JERR: /* J-state instead of response */ + /* + * packet experienced an error that we cannot recover + * from; report error + */ + max3421_hcd->urb_done = hrsl_to_error[result_code]; + dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x", + __func__, hrsl); + break; + + case MAX3421_HRSL_TOGERR: + if (usb_urb_dir_in(urb)) + ; /* don't do anything (device will switch toggle) */ + else { + /* flip the send toggle bit: */ + int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + + sndtog ^= 1; + spi_wr8(hcd, MAX3421_REG_HCTL, + BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); + } + /* FALL THROUGH */ + case MAX3421_HRSL_BADBC: /* bad byte count */ + case MAX3421_HRSL_PIDERR: /* received PID is corrupted */ + case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */ + case MAX3421_HRSL_CRCERR: /* CRC error */ + case MAX3421_HRSL_BABBLE: /* device talked too long */ + case MAX3421_HRSL_TIMEOUT: + if (max3421_ep->retries++ < USB_MAX_RETRIES) + /* retry the packet again in the next frame */ + max3421_slow_retransmit(hcd); + else { + /* Based on ohci.h cc_to_err[]: */ + max3421_hcd->urb_done = hrsl_to_error[result_code]; + dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x", + __func__, hrsl); + } + break; + + case MAX3421_HRSL_STALL: + dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x", + __func__, hrsl); + max3421_hcd->urb_done = hrsl_to_error[result_code]; + break; + + case MAX3421_HRSL_NAK: + /* + * Device wasn't ready for data or has no data + * available: retry the packet again. + */ + if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) { + max3421_next_transfer(hcd, 1); + switch_sndfifo = 0; + } else + max3421_slow_retransmit(hcd); + break; + } + if (switch_sndfifo) + spi_wr8(hcd, MAX3421_REG_SNDBC, 0); +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static int +max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + u32 max_packet; + + if (urb->actual_length >= urb->transfer_buffer_length) + return 1; /* read is complete, so we're done */ + + /* + * USB 2.0 Section 5.3.2 Pipes: packets must be full size + * except for last one. + */ + max_packet = usb_maxpacket(urb->dev, urb->pipe, 0); + if (max_packet > MAX3421_FIFO_SIZE) { + /* + * We do not support isochronous transfers at this + * time... + */ + dev_err(&spi->dev, + "%s: packet-size of %u too big (limit is %u bytes)", + __func__, max_packet, MAX3421_FIFO_SIZE); + return -EINVAL; + } + + if (max3421_hcd->curr_len < max_packet) { + if (urb->transfer_flags & URB_SHORT_NOT_OK) { + /* + * remaining > 0 and received an + * unexpected partial packet -> + * error + */ + return -EREMOTEIO; + } else + /* short read, but it's OK */ + return 1; + } + return 0; /* not done */ +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static int +max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + + urb->actual_length += max3421_hcd->curr_len; + if (urb->actual_length < urb->transfer_buffer_length) + return 0; + if (urb->transfer_flags & URB_ZERO_PACKET) { + /* + * Some hardware needs a zero-size packet at the end + * of a bulk-out transfer if the last transfer was a + * full-sized packet (i.e., such hardware use < + * max_packet as an indicator that the end of the + * packet has been reached). + */ + u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1); + + if (max3421_hcd->curr_len == max_packet) + return 0; + } + return 1; +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static void +max3421_host_transfer_done(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct urb *urb = max3421_hcd->curr_urb; + struct max3421_ep *max3421_ep; + u8 result_code, hrsl; + int urb_done = 0; + + max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) | + BIT(MAX3421_HI_RCVDAV_BIT)); + + hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + result_code = hrsl & MAX3421_HRSL_RESULT_MASK; + +#ifdef DEBUG + ++max3421_hcd->err_stat[result_code]; +#endif + + max3421_ep = urb->ep->hcpriv; + + if (unlikely(result_code != MAX3421_HRSL_OK)) { + max3421_handle_error(hcd, hrsl); + return; + } + + max3421_ep->naks = 0; + max3421_ep->retries = 0; + switch (max3421_ep->pkt_state) { + + case PKT_STATE_SETUP: + if (urb->transfer_buffer_length > 0) + max3421_ep->pkt_state = PKT_STATE_TRANSFER; + else + max3421_ep->pkt_state = PKT_STATE_TERMINATE; + break; + + case PKT_STATE_TRANSFER: + if (usb_urb_dir_in(urb)) + urb_done = max3421_transfer_in_done(hcd, urb); + else + urb_done = max3421_transfer_out_done(hcd, urb); + if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) { + /* + * We aren't really done - we still need to + * terminate the control transfer: + */ + max3421_hcd->urb_done = urb_done = 0; + max3421_ep->pkt_state = PKT_STATE_TERMINATE; + } + break; + + case PKT_STATE_TERMINATE: + urb_done = 1; + break; + } + + if (urb_done) + max3421_hcd->urb_done = urb_done; + else + max3421_next_transfer(hcd, 0); +} + +/* + * Caller must NOT hold HCD spinlock. + */ +static void +max3421_detect_conn(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + unsigned int jk, have_conn = 0; + u32 old_port_status, chg; + unsigned long flags; + u8 hrsl, mode; + + hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + + jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) | + (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1)); + + mode = max3421_hcd->mode; + + switch (jk) { + case 0x0: /* SE0: disconnect */ + /* + * Turn off SOFKAENAB bit to avoid getting interrupt + * every milli-second: + */ + mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT); + break; + + case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */ + case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */ + if (jk == 0x2) + /* need to switch to the other speed: */ + mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT); + /* turn on SOFKAENAB bit: */ + mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT); + have_conn = 1; + break; + + case 0x3: /* illegal */ + break; + } + + max3421_hcd->mode = mode; + spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode); + + spin_lock_irqsave(&max3421_hcd->lock, flags); + old_port_status = max3421_hcd->port_status; + if (have_conn) + max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION; + else + max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION; + if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT)) + max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED; + else + max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED; + chg = (old_port_status ^ max3421_hcd->port_status); + max3421_hcd->port_status |= chg << 16; + spin_unlock_irqrestore(&max3421_hcd->lock, flags); +} + +static irqreturn_t +max3421_irq_handler(int irq, void *dev_id) +{ + struct usb_hcd *hcd = dev_id; + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + + if (max3421_hcd->spi_thread && + max3421_hcd->spi_thread->state != TASK_RUNNING) + wake_up_process(max3421_hcd->spi_thread); + if (!max3421_hcd->do_enable_irq) { + max3421_hcd->do_enable_irq = 1; + disable_irq_nosync(spi->irq); + } + return IRQ_HANDLED; +} + +#ifdef DEBUG + +static void +dump_eps(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct max3421_ep *max3421_ep; + struct usb_host_endpoint *ep; + struct list_head *pos, *upos; + char ubuf[512], *dp, *end; + unsigned long flags; + struct urb *urb; + int epnum, ret; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + list_for_each(pos, &max3421_hcd->ep_list) { + max3421_ep = container_of(pos, struct max3421_ep, ep_list); + ep = max3421_ep->ep; + + dp = ubuf; + end = dp + sizeof(ubuf); + *dp = '\0'; + list_for_each(upos, &ep->urb_list) { + urb = container_of(upos, struct urb, urb_list); + ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb, + usb_pipetype(urb->pipe), + usb_urb_dir_in(urb) ? "IN" : "OUT", + urb->actual_length, + urb->transfer_buffer_length); + if (ret < 0 || ret >= end - dp) + break; /* error or buffer full */ + dp += ret; + } + + epnum = usb_endpoint_num(&ep->desc); + pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n", + epnum, max3421_ep->pkt_state, max3421_ep->last_active, + max3421_ep->retries, max3421_ep->naks, + max3421_ep->retransmit, ubuf); + } + spin_unlock_irqrestore(&max3421_hcd->lock, flags); +} + +#endif /* DEBUG */ + +/* Return zero if no work was performed, 1 otherwise. */ +static int +max3421_handle_irqs(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + u32 chg, old_port_status; + unsigned long flags; + u8 hirq; + + /* + * Read and ack pending interrupts (CPU must never + * clear SNDBAV directly and RCVDAV must be cleared by + * max3421_recv_data_available()!): + */ + hirq = spi_rd8(hcd, MAX3421_REG_HIRQ); + hirq &= max3421_hcd->hien; + if (!hirq) + return 0; + + spi_wr8(hcd, MAX3421_REG_HIRQ, + hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) | + BIT(MAX3421_HI_RCVDAV_BIT))); + + if (hirq & BIT(MAX3421_HI_FRAME_BIT)) { + max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1) + & USB_MAX_FRAME_NUMBER); + max3421_hcd->sched_pass = SCHED_PASS_PERIODIC; + } + + if (hirq & BIT(MAX3421_HI_RCVDAV_BIT)) + max3421_recv_data_available(hcd); + + if (hirq & BIT(MAX3421_HI_HXFRDN_BIT)) + max3421_host_transfer_done(hcd); + + if (hirq & BIT(MAX3421_HI_CONDET_BIT)) + max3421_detect_conn(hcd); + + /* + * Now process interrupts that may affect HCD state + * other than the end-points: + */ + spin_lock_irqsave(&max3421_hcd->lock, flags); + + old_port_status = max3421_hcd->port_status; + if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) { + if (max3421_hcd->port_status & USB_PORT_STAT_RESET) { + /* BUSEVENT due to completion of Bus Reset */ + max3421_hcd->port_status &= ~USB_PORT_STAT_RESET; + max3421_hcd->port_status |= USB_PORT_STAT_ENABLE; + } else { + /* BUSEVENT due to completion of Bus Resume */ + pr_info("%s: BUSEVENT Bus Resume Done\n", __func__); + } + } + if (hirq & BIT(MAX3421_HI_RWU_BIT)) + pr_info("%s: RWU\n", __func__); + if (hirq & BIT(MAX3421_HI_SUSDN_BIT)) + pr_info("%s: SUSDN\n", __func__); + + chg = (old_port_status ^ max3421_hcd->port_status); + max3421_hcd->port_status |= chg << 16; + + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + +#ifdef DEBUG + { + static unsigned long last_time; + char sbuf[16 * 16], *dp, *end; + int i; + + if (jiffies - last_time > 5*HZ) { + dp = sbuf; + end = sbuf + sizeof(sbuf); + *dp = '\0'; + for (i = 0; i < 16; ++i) { + int ret = snprintf(dp, end - dp, " %lu", + max3421_hcd->err_stat[i]); + if (ret < 0 || ret >= end - dp) + break; /* error or buffer full */ + dp += ret; + } + pr_info("%s: hrsl_stats %s\n", __func__, sbuf); + memset(max3421_hcd->err_stat, 0, + sizeof(max3421_hcd->err_stat)); + last_time = jiffies; + + dump_eps(hcd); + } + } +#endif + return 1; +} + +static int +max3421_reset_hcd(struct usb_hcd *hcd) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + int timeout; + + /* perform a chip reset and wait for OSCIRQ signal to appear: */ + spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT)); + /* clear reset: */ + spi_wr8(hcd, MAX3421_REG_USBCTL, 0); + timeout = 1000; + while (1) { + if (spi_rd8(hcd, MAX3421_REG_USBIRQ) + & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT)) + break; + if (--timeout < 0) { + dev_err(&spi->dev, + "timed out waiting for oscillator OK signal"); + return 1; + } + cond_resched(); + } + + /* + * Turn on host mode, automatic generation of SOF packets, and + * enable pull-down registers on DM/DP: + */ + max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) | + BIT(MAX3421_MODE_SOFKAENAB_BIT) | + BIT(MAX3421_MODE_DMPULLDN_BIT) | + BIT(MAX3421_MODE_DPPULLDN_BIT)); + spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode); + + /* reset frame-number: */ + max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER; + spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT)); + + /* sample the state of the D+ and D- lines */ + spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT)); + max3421_detect_conn(hcd); + + /* enable frame, connection-detected, and bus-event interrupts: */ + max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) | + BIT(MAX3421_HI_CONDET_BIT) | + BIT(MAX3421_HI_BUSEVENT_BIT)); + spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien); + + /* enable interrupts: */ + spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT)); + return 1; +} + +static int +max3421_urb_done(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + unsigned long flags; + struct urb *urb; + int status; + + status = max3421_hcd->urb_done; + max3421_hcd->urb_done = 0; + if (status > 0) + status = 0; + urb = max3421_hcd->curr_urb; + if (urb) { + max3421_hcd->curr_urb = NULL; + spin_lock_irqsave(&max3421_hcd->lock, flags); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + + /* must be called without the HCD spinlock: */ + usb_hcd_giveback_urb(hcd, urb, status); + } + return 1; +} + +static int +max3421_spi_thread(void *dev_id) +{ + struct usb_hcd *hcd = dev_id; + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + int i, i_worked = 1; + + /* set full-duplex SPI mode, low-active interrupt pin: */ + spi_wr8(hcd, MAX3421_REG_PINCTL, + (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */ + BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */ + + while (!kthread_should_stop()) { + max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION); + if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13) + break; + dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev); + msleep(10000); + } + dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n", + max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word, + spi->irq); + + while (!kthread_should_stop()) { + if (!i_worked) { + /* + * We'll be waiting for wakeups from the hard + * interrupt handler, so now is a good time to + * sync our hien with the chip: + */ + spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien); + + set_current_state(TASK_INTERRUPTIBLE); + if (max3421_hcd->do_enable_irq) { + max3421_hcd->do_enable_irq = 0; + enable_irq(spi->irq); + } + schedule(); + __set_current_state(TASK_RUNNING); + } + + i_worked = 0; + + if (max3421_hcd->urb_done) + i_worked |= max3421_urb_done(hcd); + else if (max3421_handle_irqs(hcd)) + i_worked = 1; + else if (!max3421_hcd->curr_urb) + i_worked |= max3421_select_and_start_urb(hcd); + + if (max3421_hcd->do_reset_hcd) { + /* reset the HCD: */ + max3421_hcd->do_reset_hcd = 0; + i_worked |= max3421_reset_hcd(hcd); + } + if (max3421_hcd->do_reset_port) { + /* perform a USB bus reset: */ + max3421_hcd->do_reset_port = 0; + spi_wr8(hcd, MAX3421_REG_HCTL, + BIT(MAX3421_HCTL_BUSRST_BIT)); + i_worked = 1; + } + if (max3421_hcd->do_check_unlink) { + max3421_hcd->do_check_unlink = 0; + i_worked |= max3421_check_unlink(hcd); + } + if (max3421_hcd->do_iopin_update) { + /* + * IOPINS1/IOPINS2 do not auto-increment, so we can't + * use spi_wr_buf(). + */ + for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) { + u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1); + + val = ((val & 0xf0) | + (max3421_hcd->iopins[i] & 0x0f)); + spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val); + max3421_hcd->iopins[i] = val; + } + max3421_hcd->do_iopin_update = 0; + i_worked = 1; + } + } + set_current_state(TASK_RUNNING); + dev_info(&spi->dev, "SPI thread exiting"); + return 0; +} + +static int +max3421_reset_port(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + + max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE | + USB_PORT_STAT_LOW_SPEED); + max3421_hcd->do_reset_port = 1; + wake_up_process(max3421_hcd->spi_thread); + return 0; +} + +static int +max3421_reset(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + + hcd->self.sg_tablesize = 0; + hcd->speed = HCD_USB2; + hcd->self.root_hub->speed = USB_SPEED_FULL; + max3421_hcd->do_reset_hcd = 1; + wake_up_process(max3421_hcd->spi_thread); + return 0; +} + +static int +max3421_start(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + + spin_lock_init(&max3421_hcd->lock); + max3421_hcd->rh_state = MAX3421_RH_RUNNING; + + INIT_LIST_HEAD(&max3421_hcd->ep_list); + + hcd->power_budget = POWER_BUDGET; + hcd->state = HC_STATE_RUNNING; + hcd->uses_new_polling = 1; + return 0; +} + +static void +max3421_stop(struct usb_hcd *hcd) +{ +} + +static int +max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct max3421_ep *max3421_ep; + unsigned long flags; + int retval; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_INTERRUPT: + case PIPE_ISOCHRONOUS: + if (urb->interval < 0) { + dev_err(&spi->dev, + "%s: interval=%d for intr-/iso-pipe; expected > 0\n", + __func__, urb->interval); + return -EINVAL; + } + default: + break; + } + + spin_lock_irqsave(&max3421_hcd->lock, flags); + + max3421_ep = urb->ep->hcpriv; + if (!max3421_ep) { + /* gets freed in max3421_endpoint_disable: */ + max3421_ep = kzalloc(sizeof(struct max3421_ep), mem_flags); + if (!max3421_ep) + return -ENOMEM; + max3421_ep->ep = urb->ep; + max3421_ep->last_active = max3421_hcd->frame_number; + urb->ep->hcpriv = max3421_ep; + + list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list); + } + + retval = usb_hcd_link_urb_to_ep(hcd, urb); + if (retval == 0) { + /* Since we added to the queue, restart scheduling: */ + max3421_hcd->sched_pass = SCHED_PASS_PERIODIC; + wake_up_process(max3421_hcd->spi_thread); + } + + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + return retval; +} + +static int +max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + unsigned long flags; + int retval; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + + /* + * This will set urb->unlinked which in turn causes the entry + * to be dropped at the next opportunity. + */ + retval = usb_hcd_check_unlink_urb(hcd, urb, status); + if (retval == 0) { + max3421_hcd->do_check_unlink = 1; + wake_up_process(max3421_hcd->spi_thread); + } + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + return retval; +} + +static void +max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + unsigned long flags; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + + if (ep->hcpriv) { + struct max3421_ep *max3421_ep = ep->hcpriv; + + /* remove myself from the ep_list: */ + if (!list_empty(&max3421_ep->ep_list)) + list_del(&max3421_ep->ep_list); + kfree(max3421_ep); + ep->hcpriv = NULL; + } + + spin_unlock_irqrestore(&max3421_hcd->lock, flags); +} + +static int +max3421_get_frame_number(struct usb_hcd *hcd) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + return max3421_hcd->frame_number; +} + +/* + * Should return a non-zero value when any port is undergoing a resume + * transition while the root hub is suspended. + */ +static int +max3421_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + if (!HCD_HW_ACCESSIBLE(hcd)) + goto done; + + *buf = 0; + if ((max3421_hcd->port_status & PORT_C_MASK) != 0) { + *buf = (1 << 1); /* a hub over-current condition exists */ + dev_dbg(hcd->self.controller, + "port status 0x%08x has changes\n", + max3421_hcd->port_status); + retval = 1; + if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + } +done: + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + return retval; +} + +static inline void +hub_descriptor(struct usb_hub_descriptor *desc) +{ + memset(desc, 0, sizeof(*desc)); + /* + * See Table 11-13: Hub Descriptor in USB 2.0 spec. + */ + desc->bDescriptorType = 0x29; /* hub descriptor */ + desc->bDescLength = 9; + desc->wHubCharacteristics = cpu_to_le16(0x0001); + desc->bNbrPorts = 1; +} + +/* + * Set the MAX3421E general-purpose output with number PIN_NUMBER to + * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For + * any other value, this function acts as a no-op. + */ +static void +max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) +{ + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + u8 mask, idx; + + --pin_number; + if (pin_number > 7) + return; + + mask = 1u << pin_number; + idx = pin_number / 4; + + if (value) + max3421_hcd->iopins[idx] |= mask; + else + max3421_hcd->iopins[idx] &= ~mask; + max3421_hcd->do_iopin_update = 1; + wake_up_process(max3421_hcd->spi_thread); +} + +static int +max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index, + char *buf, u16 length) +{ + struct spi_device *spi = to_spi_device(hcd->self.controller); + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct max3421_hcd_platform_data *pdata; + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&max3421_hcd->lock, flags); + + pdata = spi->dev.platform_data; + + switch (type_req) { + case ClearHubFeature: + break; + case ClearPortFeature: + switch (value) { + case USB_PORT_FEAT_SUSPEND: + break; + case USB_PORT_FEAT_POWER: + dev_dbg(hcd->self.controller, "power-off\n"); + max3421_gpout_set_value(hcd, pdata->vbus_gpout, 0); + /* FALLS THROUGH */ + default: + max3421_hcd->port_status &= ~(1 << value); + } + break; + case GetHubDescriptor: + hub_descriptor((struct usb_hub_descriptor *) buf); + break; + + case DeviceRequest | USB_REQ_GET_DESCRIPTOR: + case GetPortErrorCount: + case SetHubDepth: + /* USB3 only */ + goto error; + + case GetHubStatus: + *(__le32 *) buf = cpu_to_le32(0); + break; + + case GetPortStatus: + if (index != 1) { + retval = -EPIPE; + goto error; + } + ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status); + ((__le16 *) buf)[1] = + cpu_to_le16(max3421_hcd->port_status >> 16); + break; + + case SetHubFeature: + retval = -EPIPE; + break; + + case SetPortFeature: + switch (value) { + case USB_PORT_FEAT_LINK_STATE: + case USB_PORT_FEAT_U1_TIMEOUT: + case USB_PORT_FEAT_U2_TIMEOUT: + case USB_PORT_FEAT_BH_PORT_RESET: + goto error; + case USB_PORT_FEAT_SUSPEND: + if (max3421_hcd->active) + max3421_hcd->port_status |= + USB_PORT_STAT_SUSPEND; + break; + case USB_PORT_FEAT_POWER: + dev_dbg(hcd->self.controller, "power-on\n"); + max3421_hcd->port_status |= USB_PORT_STAT_POWER; + max3421_gpout_set_value(hcd, pdata->vbus_gpout, 1); + break; + case USB_PORT_FEAT_RESET: + max3421_reset_port(hcd); + /* FALLS THROUGH */ + default: + if ((max3421_hcd->port_status & USB_PORT_STAT_POWER) + != 0) + max3421_hcd->port_status |= (1 << value); + } + break; + + default: + dev_dbg(hcd->self.controller, + "hub control req%04x v%04x i%04x l%d\n", + type_req, value, index, length); +error: /* "protocol stall" on error */ + retval = -EPIPE; + } + + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + return retval; +} + +static int +max3421_bus_suspend(struct usb_hcd *hcd) +{ + return -1; +} + +static int +max3421_bus_resume(struct usb_hcd *hcd) +{ + return -1; +} + +/* + * The SPI driver already takes care of DMA-mapping/unmapping, so no + * reason to do it twice. + */ +static int +max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ + return 0; +} + +static void +max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ +} + +static struct hc_driver max3421_hcd_desc = { + .description = "max3421", + .product_desc = DRIVER_DESC, + .hcd_priv_size = sizeof(struct max3421_hcd), + .flags = HCD_USB11, + .reset = max3421_reset, + .start = max3421_start, + .stop = max3421_stop, + .get_frame_number = max3421_get_frame_number, + .urb_enqueue = max3421_urb_enqueue, + .urb_dequeue = max3421_urb_dequeue, + .map_urb_for_dma = max3421_map_urb_for_dma, + .unmap_urb_for_dma = max3421_unmap_urb_for_dma, + .endpoint_disable = max3421_endpoint_disable, + .hub_status_data = max3421_hub_status_data, + .hub_control = max3421_hub_control, + .bus_suspend = max3421_bus_suspend, + .bus_resume = max3421_bus_resume, +}; + +static int +max3421_probe(struct spi_device *spi) +{ + struct max3421_hcd *max3421_hcd; + struct usb_hcd *hcd; + int retval; + + if (spi_setup(spi) < 0) { + dev_err(&spi->dev, "Unable to setup SPI bus"); + return -EFAULT; + } + + hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev, + dev_name(&spi->dev)); + if (!hcd) { + dev_err(&spi->dev, "failed to create HCD structure\n"); + return -ENOMEM; + } + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); + max3421_hcd = hcd_to_max3421(hcd); + max3421_hcd->next = max3421_hcd_list; + max3421_hcd_list = max3421_hcd; + INIT_LIST_HEAD(&max3421_hcd->ep_list); + + max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd, + "max3421_spi_thread"); + if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) { + dev_err(&spi->dev, + "failed to create SPI thread (out of memory)\n"); + return -ENOMEM; + } + + retval = usb_add_hcd(hcd, 0, 0); + if (retval) { + dev_err(&spi->dev, "failed to add HCD\n"); + usb_put_hcd(hcd); + return retval; + } + + retval = request_irq(spi->irq, max3421_irq_handler, + IRQF_TRIGGER_LOW, "max3421", hcd); + if (retval < 0) { + usb_put_hcd(hcd); + dev_err(&spi->dev, "failed to request irq %d\n", spi->irq); + return retval; + } + return 0; +} + +static int +max3421_remove(struct spi_device *spi) +{ + struct max3421_hcd *max3421_hcd = NULL, **prev; + struct usb_hcd *hcd = NULL; + unsigned long flags; + + for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) { + max3421_hcd = *prev; + hcd = max3421_to_hcd(max3421_hcd); + if (hcd->self.controller == &spi->dev) + break; + } + if (!max3421_hcd) { + dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n", + spi); + return -ENODEV; + } + + usb_remove_hcd(hcd); + + spin_lock_irqsave(&max3421_hcd->lock, flags); + + kthread_stop(max3421_hcd->spi_thread); + *prev = max3421_hcd->next; + + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + + free_irq(spi->irq, hcd); + + usb_put_hcd(hcd); + return 0; +} + +static struct spi_driver max3421_driver = { + .probe = max3421_probe, + .remove = max3421_remove, + .driver = { + .name = "max3421-hcd", + .owner = THIS_MODULE, + }, +}; + +static int __init +max3421_mod_init(void) +{ + return spi_register_driver(&max3421_driver); +} + +static void __exit +max3421_mod_exit(void) +{ + spi_unregister_driver(&max3421_driver); +} + +module_init(max3421_mod_init); +module_exit(max3421_mod_exit); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("David Mosberger "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/platform_data/max3421-hcd.h b/include/linux/platform_data/max3421-hcd.h new file mode 100644 index 000000000000..4ad459605d87 --- /dev/null +++ b/include/linux/platform_data/max3421-hcd.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2014 eGauge Systems LLC + * Contributed by David Mosberger-Tang + * + * Platform-data structure for MAX3421 USB HCD driver. + * + */ +#ifndef MAX3421_HCD_PLAT_H_INCLUDED +#define MAX3421_HCD_PLAT_H_INCLUDED + +/* + * This structure defines the mapping of certain auxiliary functions to the + * MAX3421E GPIO pins. The chip has eight GP inputs and eight GP outputs. + * A value of 0 indicates that the pin is not used/wired to anything. + * + * At this point, the only control the max3421-hcd driver cares about is + * to control Vbus (5V to the peripheral). + */ +struct max3421_hcd_platform_data { + u8 vbus_gpout; /* pin controlling Vbus */ +}; + +#endif /* MAX3421_HCD_PLAT_H_INCLUDED */ -- cgit v1.2.3 From a43ae58c848cfbadaba81c8d63202b4487f922a0 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Tue, 6 May 2014 11:29:52 +0800 Subject: PCI: Turn pcibios_penalize_isa_irq() into a weak function pcibios_penalize_isa_irq() is only implemented by x86 now, and legacy ISA is not used by some architectures. Make pcibios_penalize_isa_irq() a __weak function to simplify the code. This removes the need for new platforms to add stub implementations of pcibios_penalize_isa_irq(). [bhelgaas: changelog, comments] Signed-off-by: Hanjun Guo Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann --- arch/alpha/include/asm/pci.h | 5 ----- arch/arm/include/asm/pci.h | 5 ----- arch/blackfin/include/asm/pci.h | 5 ----- arch/cris/include/asm/pci.h | 1 - arch/frv/include/asm/pci.h | 2 -- arch/frv/mb93090-mb00/pci-irq.c | 4 ---- arch/ia64/include/asm/pci.h | 6 ------ arch/microblaze/include/asm/pci.h | 5 ----- arch/mips/include/asm/pci.h | 5 ----- arch/mn10300/include/asm/pci.h | 1 - arch/mn10300/unit-asb2305/pci-irq.c | 4 ---- arch/parisc/include/asm/pci.h | 5 ----- arch/powerpc/include/asm/pci.h | 5 ----- arch/sh/include/asm/pci.h | 5 ----- arch/sparc/include/asm/pci_32.h | 5 ----- arch/sparc/include/asm/pci_64.h | 5 ----- arch/unicore32/include/asm/pci.h | 5 ----- arch/x86/include/asm/pci.h | 1 - arch/xtensa/include/asm/pci.h | 5 ----- drivers/pci/pci.c | 11 +++++++++++ include/linux/pci.h | 1 + 21 files changed, 12 insertions(+), 79 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index d01afb78919c..f7f680f7457d 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -59,11 +59,6 @@ struct pci_controller { extern void pcibios_set_master(struct pci_dev *dev); -extern inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* IOMMU controls. */ /* The PCI address space does not equal the physical memory address space. diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 680a83e94467..7e95d8535e24 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -31,11 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) } #endif /* CONFIG_PCI_DOMAINS */ -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* * The PCI address space does equal the physical memory address space. * The networking and block device layers use this boolean for bounce diff --git a/arch/blackfin/include/asm/pci.h b/arch/blackfin/include/asm/pci.h index 74352c4597d9..c737909fba47 100644 --- a/arch/blackfin/include/asm/pci.h +++ b/arch/blackfin/include/asm/pci.h @@ -10,9 +10,4 @@ #define PCIBIOS_MIN_IO 0x00001000 #define PCIBIOS_MIN_MEM 0x10000000 -static inline void pcibios_penalize_isa_irq(int irq) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #endif /* _ASM_BFIN_PCI_H */ diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h index f666734926d5..cc2399c175e9 100644 --- a/arch/cris/include/asm/pci.h +++ b/arch/cris/include/asm/pci.h @@ -20,7 +20,6 @@ void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h index ef03baf5d89d..2035a4d3f9b9 100644 --- a/arch/frv/include/asm/pci.h +++ b/arch/frv/include/asm/pci.h @@ -24,8 +24,6 @@ struct pci_dev; extern void pcibios_set_master(struct pci_dev *dev); -extern void pcibios_penalize_isa_irq(int irq); - #ifdef CONFIG_MMU extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); extern void consistent_free(void *vaddr); diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c index c677b9d81d30..1c35c93f942b 100644 --- a/arch/frv/mb93090-mb00/pci-irq.c +++ b/arch/frv/mb93090-mb00/pci-irq.c @@ -55,10 +55,6 @@ void __init pcibios_fixup_irqs(void) } } -void __init pcibios_penalize_isa_irq(int irq) -{ -} - void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 7d41cc089822..52af5ed9f60b 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -50,12 +50,6 @@ struct pci_dev; extern unsigned long ia64_max_iommu_merge_mask; #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) -static inline void -pcibios_penalize_isa_irq (int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #include #ifdef CONFIG_PCI diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 935f9bec414a..335524040fff 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -44,11 +44,6 @@ struct pci_dev; */ #define pcibios_assign_all_busses() 0 -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #ifdef CONFIG_PCI extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); extern struct dma_map_ops *get_pci_dma_ops(void); diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 12d6842962be..974b0e308963 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -73,11 +73,6 @@ extern unsigned long PCIBIOS_MIN_MEM; extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 166323824683..5f70af25c7d0 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -48,7 +48,6 @@ extern void unit_pci_init(void); #define PCIBIOS_MIN_MEM 0xB8000000 void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. diff --git a/arch/mn10300/unit-asb2305/pci-irq.c b/arch/mn10300/unit-asb2305/pci-irq.c index 77439da04671..fcb28ceb824d 100644 --- a/arch/mn10300/unit-asb2305/pci-irq.c +++ b/arch/mn10300/unit-asb2305/pci-irq.c @@ -40,10 +40,6 @@ void __init pcibios_fixup_irqs(void) } } -void __init pcibios_penalize_isa_irq(int irq) -{ -} - void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 465154076d23..20df2b04fc09 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -215,11 +215,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't need to penalize isa irq's */ -} - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 95145a15c708..1b0739bc14b5 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -46,11 +46,6 @@ struct pci_dev; #define pcibios_assign_all_busses() \ (pci_has_flag(PCI_REASSIGN_ALL_BUS)) -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index bff96c2e7d25..5b4511552998 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -70,11 +70,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Dynamic DMA mapping stuff. * SuperH has everything mapped statically like x86. */ diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index dc503297481f..53e9b4987db0 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -16,11 +16,6 @@ #define PCI_IRQ_NONE 0xffffffff -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Dynamic DMA mapping stuff. */ #define PCI_DMA_BUS_IS_PHYS (0) diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 1633b718d3bc..c6c7396e7627 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -16,11 +16,6 @@ #define PCI_IRQ_NONE 0xffffffff -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* The PCI address space does not equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h index f5e108f4a151..654407e98619 100644 --- a/arch/unicore32/include/asm/pci.h +++ b/arch/unicore32/include/asm/pci.h @@ -18,11 +18,6 @@ #include #include /* for PCIBIOS_MIN_* */ -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 96ae4f4040bb..0892ea0e683f 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -68,7 +68,6 @@ void pcibios_config_init(void); void pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 614be031a79a..5d52dc43dfe7 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -22,11 +22,6 @@ extern struct pci_controller* pcibios_alloc_controller(void); -static inline void pcibios_penalize_isa_irq(int irq) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Assume some values. (We should revise them, if necessary) */ #define PCIBIOS_MIN_IO 0x2000 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 39012831867e..11f24912523c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1468,6 +1468,17 @@ void __weak pcibios_release_device(struct pci_dev *dev) {} */ void __weak pcibios_disable_device (struct pci_dev *dev) {} +/** + * pcibios_penalize_isa_irq - penalize an ISA IRQ + * @irq: ISA IRQ to penalize + * @active: IRQ active or not + * + * Permits the platform to provide architecture-specific functionality when + * penalizing ISA IRQs. This is the default implementation. Architecture + * implementations can override this. + */ +void __weak pcibios_penalize_isa_irq(int irq, int active) {} + static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; diff --git a/include/linux/pci.h b/include/linux/pci.h index 84182b153b21..018877b8b4e8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1578,6 +1578,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pcibios_add_device(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); +void pcibios_penalize_isa_irq(int irq, int active); #ifdef CONFIG_HIBERNATE_CALLBACKS extern struct dev_pm_ops pcibios_pm_ops; -- cgit v1.2.3 From c4128cac3557ddd5fa972cb6511c426cd94a7ccd Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Delgado Date: Thu, 15 May 2014 14:28:46 +0200 Subject: usb: gadget: net2280: Add support for PLX USB338X This patch adds support for the PLX USB3380 and USB3382. This driver is based on the driver from the manufacturer. Since USB338X is register compatible with NET2280, I thought that it would be better to include this hardware into net2280 driver. Manufacturer's driver only supported the USB33X, did not follow the Kernel Style and contain some trivial errors. This patch has tried to address this issues. This patch has only been tested on USB338x hardware, but the merge has been done trying to not affect the behaviour of NET2280. Signed-off-by: Ricardo Ribalda Delgado Tested-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/Kconfig | 10 +- drivers/usb/gadget/net2280.c | 1115 ++++++++++++++++++++++++++++++++++++++---- drivers/usb/gadget/net2280.h | 97 +++- include/linux/usb/usb338x.h | 199 ++++++++ 4 files changed, 1330 insertions(+), 91 deletions(-) create mode 100644 include/linux/usb/usb338x.h (limited to 'include/linux') diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index ba18e9c110cc..49e434ec527d 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -409,7 +409,7 @@ config USB_NET2272_DMA If unsure, say "N" here. The driver works fine in PIO mode. config USB_NET2280 - tristate "NetChip 228x" + tristate "NetChip 228x / PLX USB338x" depends on PCI help NetChip 2280 / 2282 is a PCI based USB peripheral controller which @@ -419,6 +419,14 @@ config USB_NET2280 (for control transfers) and several endpoints with dedicated functions. + PLX 3380 / 3382 is a PCIe based USB peripheral controller which + supports full, high speed USB 2.0 and super speed USB 3.0 + data transfers. + + It has eight configurable endpoints, as well as endpoint zero + (for control transfers) and several endpoints with dedicated + functions. + Say "y" to link the driver statically, or "m" to build a dynamically linked module called "net2280" and force all gadget drivers to also be dynamically linked. diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 300b3a71383b..87789c9bf7fe 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -18,6 +18,9 @@ * hint to completely eliminate some IRQs, if a later IRQ is guaranteed * and DMA chaining is enabled. * + * MSI is enabled by default. The legacy IRQ is used if MSI couldn't + * be enabled. + * * Note that almost all the errata workarounds here are only needed for * rev1 chips. Rev1a silicon (0110) fixes almost all of them. */ @@ -25,10 +28,14 @@ /* * Copyright (C) 2003 David Brownell * Copyright (C) 2003-2005 PLX Technology, Inc. + * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS * * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility * with 2282 chip * + * Modified Ricardo Ribalda Qtechnology AS to provide compatibility + * with usb 338x chip. Based on PLX driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -61,9 +68,8 @@ #include #include - -#define DRIVER_DESC "PLX NET228x USB Peripheral Controller" -#define DRIVER_VERSION "2005 Sept 27" +#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" +#define DRIVER_VERSION "2005 Sept 27/v3.0" #define EP_DONTUSE 13 /* nonzero */ @@ -73,11 +79,12 @@ static const char driver_name [] = "net2280"; static const char driver_desc [] = DRIVER_DESC; +static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; static const char ep0name [] = "ep0"; static const char *const ep_name [] = { ep0name, "ep-a", "ep-b", "ep-c", "ep-d", - "ep-e", "ep-f", + "ep-e", "ep-f", "ep-g", "ep-h", }; /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO) @@ -90,11 +97,12 @@ static const char *const ep_name [] = { */ static bool use_dma = 1; static bool use_dma_chaining = 0; +static bool use_msi = 1; /* "modprobe net2280 use_dma=n" etc */ module_param (use_dma, bool, S_IRUGO); module_param (use_dma_chaining, bool, S_IRUGO); - +module_param(use_msi, bool, S_IRUGO); /* mode 0 == ep-{a,b,c,d} 1K fifo each * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable @@ -148,6 +156,9 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) struct net2280_ep *ep; u32 max, tmp; unsigned long flags; + static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; + static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80, + 0x50, 0x20, 0x70, 0x40, 0x90 }; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || !desc || ep->desc || _ep->name == ep0name @@ -161,11 +172,20 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) return -EDOM; + if (dev->pdev->vendor == 0x10b5) { + if ((desc->bEndpointAddress & 0x0f) >= 0x0c) + return -EDOM; + ep->is_in = !!usb_endpoint_dir_in(desc); + if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) + return -EINVAL; + } + /* sanity check ep-e/ep-f since their fifos are small */ max = usb_endpoint_maxp (desc) & 0x1fff; - if (ep->num > 4 && max > 64) + if (ep->num > 4 && max > 64 && (dev->pdev->vendor == 0x17cc)) return -ERANGE; + spin_lock_irqsave (&dev->lock, flags); _ep->maxpacket = max & 0x7ff; ep->desc = desc; @@ -176,7 +196,8 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) ep->out_overflow = 0; /* set speed-dependent max packet; may kick in high bandwidth */ - set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max); + set_idx_reg(dev->regs, (dev->enhanced_mode) ? ep_enhanced[ep->num] + : REG_EP_MAXPKT(dev, ep->num), max); /* FIFO lines can't go to different packets. PIO is ok, so * use it instead of troublesome (non-bulk) multi-packet DMA. @@ -199,23 +220,43 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) &ep->regs->ep_rsp); } else if (tmp == USB_ENDPOINT_XFER_BULK) { /* catch some particularly blatant driver bugs */ - if ((dev->gadget.speed == USB_SPEED_HIGH - && max != 512) - || (dev->gadget.speed == USB_SPEED_FULL - && max > 64)) { - spin_unlock_irqrestore (&dev->lock, flags); + if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || + (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || + (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { + spin_unlock_irqrestore(&dev->lock, flags); return -ERANGE; } } ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0; - tmp <<= ENDPOINT_TYPE; - tmp |= desc->bEndpointAddress; - tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */ - tmp |= 1 << ENDPOINT_ENABLE; - wmb (); + /* Enable this endpoint */ + if (dev->pdev->vendor == 0x17cc) { + tmp <<= ENDPOINT_TYPE; + tmp |= desc->bEndpointAddress; + /* default full fifo lines */ + tmp |= (4 << ENDPOINT_BYTE_COUNT); + tmp |= 1 << ENDPOINT_ENABLE; + ep->is_in = (tmp & USB_DIR_IN) != 0; + } else { + /* In Legacy mode, only OUT endpoints are used */ + if (dev->enhanced_mode && ep->is_in) { + tmp <<= IN_ENDPOINT_TYPE; + tmp |= (1 << IN_ENDPOINT_ENABLE); + /* Not applicable to Legacy */ + tmp |= (1 << ENDPOINT_DIRECTION); + } else { + tmp <<= OUT_ENDPOINT_TYPE; + tmp |= (1 << OUT_ENDPOINT_ENABLE); + tmp |= (ep->is_in << ENDPOINT_DIRECTION); + } + + tmp |= usb_endpoint_num(desc); + tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); + } + + /* Make sure all the registers are written before ep_rsp*/ + wmb(); /* for OUT transfers, block the rx fifo until a read is posted */ - ep->is_in = (tmp & USB_DIR_IN) != 0; if (!ep->is_in) writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); else if (dev->pdev->device != 0x2280) { @@ -226,11 +267,13 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); } - writel (tmp, &ep->regs->ep_cfg); + writel(tmp, &ep->cfg->ep_cfg); /* enable irqs */ if (!ep->dma) { /* pio, per-packet */ - tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0); + tmp = (dev->pdev->vendor == 0x17cc)?(1 << ep->num) + : (1 << ep_bit[ep->num]); + tmp |= readl(&dev->regs->pciirqenb0); writel (tmp, &dev->regs->pciirqenb0); tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) @@ -251,8 +294,10 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); writel (tmp, &ep->regs->ep_irqenb); - tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0); - writel (tmp, &dev->regs->pciirqenb0); + tmp = (dev->pdev->vendor == 0x17cc)?(1 << ep->num) + : (1 << ep_bit[ep->num]); + tmp |= readl(&dev->regs->pciirqenb0); + writel(tmp, &dev->regs->pciirqenb0); } } @@ -286,7 +331,8 @@ static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec) static const struct usb_ep_ops net2280_ep_ops; -static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep) +static void ep_reset_228x(struct net2280_regs __iomem *regs, + struct net2280_ep *ep) { u32 tmp; @@ -361,6 +407,55 @@ static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep) /* fifo size is handled separately */ } +static void ep_reset_338x(struct net2280_regs __iomem *regs, + struct net2280_ep *ep) +{ + u32 tmp, dmastat; + + ep->desc = NULL; + INIT_LIST_HEAD(&ep->queue); + + usb_ep_set_maxpacket_limit(&ep->ep, ~0); + ep->ep.ops = &net2280_ep_ops; + + /* disable the dma, irqs, endpoint... */ + if (ep->dma) { + writel(0, &ep->dma->dmactl); + writel((1 << DMA_ABORT_DONE_INTERRUPT) | + (1 << DMA_PAUSE_DONE_INTERRUPT) | + (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT) | + (1 << DMA_TRANSACTION_DONE_INTERRUPT) + /* | (1 << DMA_ABORT) */ + , &ep->dma->dmastat); + + dmastat = readl(&ep->dma->dmastat); + if (dmastat == 0x5002) { + WARNING(ep->dev, "The dmastat return = %x!!\n", + dmastat); + writel(0x5a, &ep->dma->dmastat); + } + + tmp = readl(®s->pciirqenb0); + tmp &= ~(1 << ep_bit[ep->num]); + writel(tmp, ®s->pciirqenb0); + } else { + if (ep->num < 5) { + tmp = readl(®s->pciirqenb1); + tmp &= ~(1 << (8 + ep->num)); /* completion */ + writel(tmp, ®s->pciirqenb1); + } + } + writel(0, &ep->regs->ep_irqenb); + + writel((1 << SHORT_PACKET_OUT_DONE_INTERRUPT) | + (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | + (1 << FIFO_OVERFLOW) | + (1 << DATA_PACKET_RECEIVED_INTERRUPT) | + (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | + (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | + (1 << DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); +} + static void nuke (struct net2280_ep *); static int net2280_disable (struct usb_ep *_ep) @@ -374,13 +469,17 @@ static int net2280_disable (struct usb_ep *_ep) spin_lock_irqsave (&ep->dev->lock, flags); nuke (ep); - ep_reset (ep->dev->regs, ep); + + if (ep->dev->pdev->vendor == 0x10b5) + ep_reset_338x(ep->dev->regs, ep); + else + ep_reset_228x(ep->dev->regs, ep); VDEBUG (ep->dev, "disabled %s %s\n", ep->dma ? "dma" : "pio", _ep->name); /* synch memory views with the device */ - (void) readl (&ep->regs->ep_cfg); + (void)readl(&ep->cfg->ep_cfg); if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) ep->dma = &ep->dev->dma [ep->num - 1]; @@ -698,6 +797,8 @@ static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma) writel (readl (&dma->dmastat), &dma->dmastat); writel (td_dma, &dma->dmadesc); + if (ep->dev->pdev->vendor == 0x10b5) + dmactl |= (0x01 << DMA_REQUEST_OUTSTANDING); writel (dmactl, &dma->dmactl); /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ @@ -772,6 +873,21 @@ static void start_dma (struct net2280_ep *ep, struct net2280_request *req) start_queue (ep, tmp, req->td_dma); } +static inline void resume_dma(struct net2280_ep *ep) +{ + writel(readl(&ep->dma->dmactl) | (1 << DMA_ENABLE), &ep->dma->dmactl); + + ep->dma_started = true; +} + +static inline void ep_stop_dma(struct net2280_ep *ep) +{ + writel(readl(&ep->dma->dmactl) & ~(1 << DMA_ENABLE), &ep->dma->dmactl); + spin_stop_dma(ep->dma); + + ep->dma_started = false; +} + static inline void queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid) { @@ -874,8 +990,23 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) /* kickstart this i/o queue? */ if (list_empty (&ep->queue) && !ep->stopped) { + /* DMA request while EP halted */ + if (ep->dma && + (readl(&ep->regs->ep_rsp) & (1 << CLEAR_ENDPOINT_HALT)) && + (dev->pdev->vendor == 0x10b5)) { + int valid = 1; + if (ep->is_in) { + int expect; + expect = likely(req->req.zero || + ((req->req.length % + ep->ep.maxpacket) != 0)); + if (expect != ep->in_fifo_validate) + valid = 0; + } + queue_dma(ep, req, valid); + } /* use DMA if the endpoint supports it, else pio */ - if (ep->dma) + else if (ep->dma) start_dma (ep, req); else { /* maybe there's no control data, just status ack */ @@ -993,6 +1124,8 @@ static void scan_dma_completions (struct net2280_ep *ep) } else if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) { tmp = readl (&ep->regs->ep_stat); + if (ep->dev->pdev->vendor == 0x10b5) + return dma_done(ep, req, tmp, 0); /* AVOID TROUBLE HERE by not issuing short reads from * your gadget driver. That helps avoids errata 0121, @@ -1079,7 +1212,7 @@ static void restart_dma (struct net2280_ep *ep) start_queue (ep, dmactl, req->td_dma); } -static void abort_dma (struct net2280_ep *ep) +static void abort_dma_228x(struct net2280_ep *ep) { /* abort the current transfer */ if (likely (!list_empty (&ep->queue))) { @@ -1091,6 +1224,19 @@ static void abort_dma (struct net2280_ep *ep) scan_dma_completions (ep); } +static void abort_dma_338x(struct net2280_ep *ep) +{ + writel((1 << DMA_ABORT), &ep->dma->dmastat); + spin_stop_dma(ep->dma); +} + +static void abort_dma(struct net2280_ep *ep) +{ + if (ep->dev->pdev->vendor == 0x17cc) + return abort_dma_228x(ep); + return abort_dma_338x(ep); +} + /* dequeue ALL requests */ static void nuke (struct net2280_ep *ep) { @@ -1244,6 +1390,9 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) ep->wedged = 1; } else { clear_halt (ep); + if (ep->dev->pdev->vendor == 0x10b5 && + !list_empty(&ep->queue) && ep->td_dma) + restart_dma(ep); ep->wedged = 0; } (void) readl (&ep->regs->ep_rsp); @@ -1367,10 +1516,13 @@ static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value) spin_lock_irqsave (&dev->lock, flags); tmp = readl (&dev->usb->usbctl); - if (value) + if (value) { tmp |= (1 << SELF_POWERED_STATUS); - else + dev->selfpowered = 1; + } else { tmp &= ~(1 << SELF_POWERED_STATUS); + dev->selfpowered = 0; + } writel (tmp, &dev->usb->usbctl); spin_unlock_irqrestore (&dev->lock, flags); @@ -1504,14 +1656,14 @@ static ssize_t registers_show(struct device *_dev, /* DMA Control Registers */ /* Configurable EP Control Registers */ - for (i = 0; i < 7; i++) { + for (i = 0; i < dev->n_ep; i++) { struct net2280_ep *ep; ep = &dev->ep [i]; if (i && !ep->desc) continue; - t1 = readl (&ep->regs->ep_cfg); + t1 = readl(&ep->cfg->ep_cfg); t2 = readl (&ep->regs->ep_rsp) & 0xff; t = scnprintf (next, size, "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" @@ -1571,7 +1723,7 @@ static ssize_t registers_show(struct device *_dev, t = scnprintf (next, size, "\nirqs: "); size -= t; next += t; - for (i = 0; i < 7; i++) { + for (i = 0; i < dev->n_ep; i++) { struct net2280_ep *ep; ep = &dev->ep [i]; @@ -1606,7 +1758,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, size = PAGE_SIZE; spin_lock_irqsave (&dev->lock, flags); - for (i = 0; i < 7; i++) { + for (i = 0; i < dev->n_ep; i++) { struct net2280_ep *ep = &dev->ep [i]; struct net2280_request *req; int t; @@ -1735,6 +1887,121 @@ static void set_fifo_mode (struct net2280 *dev, int mode) list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list); } +static void defect7374_disable_data_eps(struct net2280 *dev) +{ + /* + * For Defect 7374, disable data EPs (and more): + * - This phase undoes the earlier phase of the Defect 7374 workaround, + * returing ep regs back to normal. + */ + struct net2280_ep *ep; + int i; + unsigned char ep_sel; + u32 tmp_reg; + + for (i = 1; i < 5; i++) { + ep = &dev->ep[i]; + writel(0, &ep->cfg->ep_cfg); + } + + /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */ + for (i = 0; i < 6; i++) + writel(0, &dev->dep[i].dep_cfg); + + for (ep_sel = 0; ep_sel <= 21; ep_sel++) { + /* Select an endpoint for subsequent operations: */ + tmp_reg = readl(&dev->plregs->pl_ep_ctrl); + writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); + + if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) || + ep_sel == 18 || ep_sel == 20) + continue; + + /* Change settings on some selected endpoints */ + tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); + tmp_reg &= ~(1 << NON_CTRL_IN_TOLERATE_BAD_DIR); + writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); + tmp_reg = readl(&dev->plregs->pl_ep_ctrl); + tmp_reg |= (1 << EP_INITIALIZED); + writel(tmp_reg, &dev->plregs->pl_ep_ctrl); + } +} + +static void defect7374_enable_data_eps_zero(struct net2280 *dev) +{ + u32 tmp = 0, tmp_reg; + u32 fsmvalue, scratch; + int i; + unsigned char ep_sel; + + scratch = get_idx_reg(dev->regs, SCRATCH); + fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); + scratch &= ~(0xf << DEFECT7374_FSM_FIELD); + + /*See if firmware needs to set up for workaround*/ + if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { + WARNING(dev, "Operate Defect 7374 workaround soft this time"); + WARNING(dev, "It will operate on cold-reboot and SS connect"); + + /*GPEPs:*/ + tmp = ((0 << ENDPOINT_NUMBER) | (1 << ENDPOINT_DIRECTION) | + (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | + ((dev->enhanced_mode) ? + 1 << OUT_ENDPOINT_ENABLE : 1 << ENDPOINT_ENABLE) | + (1 << IN_ENDPOINT_ENABLE)); + + for (i = 1; i < 5; i++) + writel(tmp, &dev->ep[i].cfg->ep_cfg); + + /* CSRIN, PCIIN, STATIN, RCIN*/ + tmp = ((0 << ENDPOINT_NUMBER) | (1 << ENDPOINT_ENABLE)); + writel(tmp, &dev->dep[1].dep_cfg); + writel(tmp, &dev->dep[3].dep_cfg); + writel(tmp, &dev->dep[4].dep_cfg); + writel(tmp, &dev->dep[5].dep_cfg); + + /*Implemented for development and debug. + * Can be refined/tuned later.*/ + for (ep_sel = 0; ep_sel <= 21; ep_sel++) { + /* Select an endpoint for subsequent operations: */ + tmp_reg = readl(&dev->plregs->pl_ep_ctrl); + writel(((tmp_reg & ~0x1f) | ep_sel), + &dev->plregs->pl_ep_ctrl); + + if (ep_sel == 1) { + tmp = + (readl(&dev->plregs->pl_ep_ctrl) | + (1 << CLEAR_ACK_ERROR_CODE) | 0); + writel(tmp, &dev->plregs->pl_ep_ctrl); + continue; + } + + if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || + ep_sel == 18 || ep_sel == 20) + continue; + + tmp = (readl(&dev->plregs->pl_ep_cfg_4) | + (1 << NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); + writel(tmp, &dev->plregs->pl_ep_cfg_4); + + tmp = readl(&dev->plregs->pl_ep_ctrl) & + ~(1 << EP_INITIALIZED); + writel(tmp, &dev->plregs->pl_ep_ctrl); + + } + + /* Set FSM to focus on the first Control Read: + * - Tip: Connection speed is known upon the first + * setup request.*/ + scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; + set_idx_reg(dev->regs, SCRATCH, scratch); + + } else{ + WARNING(dev, "Defect 7374 workaround soft will NOT operate"); + WARNING(dev, "It will operate on cold-reboot and SS connect"); + } +} + /* keeping it simple: * - one bus driver, initted first; * - one function driver, initted second @@ -1744,7 +2011,7 @@ static void set_fifo_mode (struct net2280 *dev, int mode) * perhaps to bind specific drivers to specific devices. */ -static void usb_reset (struct net2280 *dev) +static void usb_reset_228x(struct net2280 *dev) { u32 tmp; @@ -1760,11 +2027,11 @@ static void usb_reset (struct net2280 *dev) /* clear old dma and irq state */ for (tmp = 0; tmp < 4; tmp++) { - struct net2280_ep *ep = &dev->ep [tmp + 1]; - + struct net2280_ep *ep = &dev->ep[tmp + 1]; if (ep->dma) - abort_dma (ep); + abort_dma(ep); } + writel (~0, &dev->regs->irqstat0), writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), @@ -1780,7 +2047,67 @@ static void usb_reset (struct net2280 *dev) set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0); } -static void usb_reinit (struct net2280 *dev) +static void usb_reset_338x(struct net2280 *dev) +{ + u32 tmp; + u32 fsmvalue; + + dev->gadget.speed = USB_SPEED_UNKNOWN; + (void)readl(&dev->usb->usbctl); + + net2280_led_init(dev); + + fsmvalue = get_idx_reg(dev->regs, SCRATCH) & + (0xf << DEFECT7374_FSM_FIELD); + + /* See if firmware needs to set up for workaround: */ + if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { + INFO(dev, "%s: Defect 7374 FsmValue 0x%08X\n", __func__, + fsmvalue); + } else { + /* disable automatic responses, and irqs */ + writel(0, &dev->usb->stdrsp); + writel(0, &dev->regs->pciirqenb0); + writel(0, &dev->regs->pciirqenb1); + } + + /* clear old dma and irq state */ + for (tmp = 0; tmp < 4; tmp++) { + struct net2280_ep *ep = &dev->ep[tmp + 1]; + + if (ep->dma) + abort_dma(ep); + } + + writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); + + if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { + /* reset, and enable pci */ + tmp = readl(&dev->regs->devinit) | + (1 << PCI_ENABLE) | + (1 << FIFO_SOFT_RESET) | + (1 << USB_SOFT_RESET) | + (1 << M8051_RESET); + + writel(tmp, &dev->regs->devinit); + } + + /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ + INIT_LIST_HEAD(&dev->gadget.ep_list); + + for (tmp = 1; tmp < dev->n_ep; tmp++) + list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); + +} + +static void usb_reset(struct net2280 *dev) +{ + if (dev->pdev->vendor == 0x17cc) + return usb_reset_228x(dev); + return usb_reset_338x(dev); +} + +static void usb_reinit_228x(struct net2280 *dev) { u32 tmp; int init_dma; @@ -1803,7 +2130,8 @@ static void usb_reinit (struct net2280 *dev) } else ep->fifo_size = 64; ep->regs = &dev->epregs [tmp]; - ep_reset (dev->regs, ep); + ep->cfg = &dev->epregs[tmp]; + ep_reset_228x(dev->regs, ep); } usb_ep_set_maxpacket_limit(&dev->ep [0].ep, 64); usb_ep_set_maxpacket_limit(&dev->ep [5].ep, 64); @@ -1820,7 +2148,122 @@ static void usb_reinit (struct net2280 *dev) writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg); } -static void ep0_start (struct net2280 *dev) +static void usb_reinit_338x(struct net2280 *dev) +{ + int init_dma; + int i; + u32 tmp, val; + u32 fsmvalue; + static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; + static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, + 0x00, 0xC0, 0x00, 0xC0 }; + + /* use_dma changes are ignored till next device re-init */ + init_dma = use_dma; + + /* basic endpoint init */ + for (i = 0; i < dev->n_ep; i++) { + struct net2280_ep *ep = &dev->ep[i]; + + ep->ep.name = ep_name[i]; + ep->dev = dev; + ep->num = i; + + if (i > 0 && i <= 4 && init_dma) + ep->dma = &dev->dma[i - 1]; + + if (dev->enhanced_mode) { + ep->cfg = &dev->epregs[ne[i]]; + ep->regs = (struct net2280_ep_regs __iomem *) + (((void *)&dev->epregs[ne[i]]) + + ep_reg_addr[i]); + ep->fiforegs = &dev->fiforegs[i]; + } else { + ep->cfg = &dev->epregs[i]; + ep->regs = &dev->epregs[i]; + ep->fiforegs = &dev->fiforegs[i]; + } + + ep->fifo_size = (i != 0) ? 2048 : 512; + + ep_reset_338x(dev->regs, ep); + } + usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); + + dev->gadget.ep0 = &dev->ep[0].ep; + dev->ep[0].stopped = 0; + + /* Link layer set up */ + fsmvalue = get_idx_reg(dev->regs, SCRATCH) & + (0xf << DEFECT7374_FSM_FIELD); + + /* See if driver needs to set up for workaround: */ + if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) + INFO(dev, "%s: Defect 7374 FsmValue %08x\n", + __func__, fsmvalue); + else { + tmp = readl(&dev->usb_ext->usbctl2) & + ~((1 << U1_ENABLE) | (1 << U2_ENABLE) | (1 << LTM_ENABLE)); + writel(tmp, &dev->usb_ext->usbctl2); + } + + /* Hardware Defect and Workaround */ + val = readl(&dev->ll_lfps_regs->ll_lfps_5); + val &= ~(0xf << TIMER_LFPS_6US); + val |= 0x5 << TIMER_LFPS_6US; + writel(val, &dev->ll_lfps_regs->ll_lfps_5); + + val = readl(&dev->ll_lfps_regs->ll_lfps_6); + val &= ~(0xffff << TIMER_LFPS_80US); + val |= 0x0100 << TIMER_LFPS_80US; + writel(val, &dev->ll_lfps_regs->ll_lfps_6); + + /* + * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB + * Hot Reset Exit Handshake may Fail in Specific Case using + * Default Register Settings. Workaround for Enumeration test. + */ + val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); + val &= ~(0x1f << HOT_TX_NORESET_TS2); + val |= 0x10 << HOT_TX_NORESET_TS2; + writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); + + val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); + val &= ~(0x1f << HOT_RX_RESET_TS2); + val |= 0x3 << HOT_RX_RESET_TS2; + writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); + + /* + * Set Recovery Idle to Recover bit: + * - On SS connections, setting Recovery Idle to Recover Fmw improves + * link robustness with various hosts and hubs. + * - It is safe to set for all connection speeds; all chip revisions. + * - R-M-W to leave other bits undisturbed. + * - Reference PLX TT-7372 + */ + val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); + val |= (1 << RECOVERY_IDLE_TO_RECOVER_FMW); + writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); + + INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); + + /* disable dedicated endpoints */ + writel(0x0D, &dev->dep[0].dep_cfg); + writel(0x0D, &dev->dep[1].dep_cfg); + writel(0x0E, &dev->dep[2].dep_cfg); + writel(0x0E, &dev->dep[3].dep_cfg); + writel(0x0F, &dev->dep[4].dep_cfg); + writel(0x0C, &dev->dep[5].dep_cfg); +} + +static void usb_reinit(struct net2280 *dev) +{ + if (dev->pdev->vendor == 0x17cc) + return usb_reinit_228x(dev); + return usb_reinit_338x(dev); +} + +static void ep0_start_228x(struct net2280 *dev) { writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE) | (1 << CLEAR_NAK_OUT_PACKETS) @@ -1863,6 +2306,61 @@ static void ep0_start (struct net2280 *dev) (void) readl (&dev->usb->usbctl); } +static void ep0_start_338x(struct net2280 *dev) +{ + u32 fsmvalue; + + fsmvalue = get_idx_reg(dev->regs, SCRATCH) & + (0xf << DEFECT7374_FSM_FIELD); + + if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) + INFO(dev, "%s: Defect 7374 FsmValue %08x\n", __func__, + fsmvalue); + else + writel((1 << CLEAR_NAK_OUT_PACKETS_MODE) | + (1 << SET_EP_HIDE_STATUS_PHASE), + &dev->epregs[0].ep_rsp); + + /* + * hardware optionally handles a bunch of standard requests + * that the API hides from drivers anyway. have it do so. + * endpoint status/features are handled in software, to + * help pass tests for some dubious behavior. + */ + writel((1 << SET_ISOCHRONOUS_DELAY) | + (1 << SET_SEL) | + (1 << SET_TEST_MODE) | + (1 << SET_ADDRESS) | + (1 << GET_INTERFACE_STATUS) | + (1 << GET_DEVICE_STATUS), + &dev->usb->stdrsp); + dev->wakeup_enable = 1; + writel((1 << USB_ROOT_PORT_WAKEUP_ENABLE) | + (dev->softconnect << USB_DETECT_ENABLE) | + (1 << DEVICE_REMOTE_WAKEUP_ENABLE), + &dev->usb->usbctl); + + /* enable irqs so we can see ep0 and general operation */ + writel((1 << SETUP_PACKET_INTERRUPT_ENABLE) | + (1 << ENDPOINT_0_INTERRUPT_ENABLE) + , &dev->regs->pciirqenb0); + writel((1 << PCI_INTERRUPT_ENABLE) | + (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | + (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) | + (1 << VBUS_INTERRUPT_ENABLE), + &dev->regs->pciirqenb1); + + /* don't leave any writes posted */ + (void)readl(&dev->usb->usbctl); +} + +static void ep0_start(struct net2280 *dev) +{ + if (dev->pdev->vendor == 0x17cc) + return ep0_start_228x(dev); + return ep0_start_338x(dev); +} + /* when a driver is successfully registered, it will receive * control requests including set_configuration(), which enables * non-control requests. then usb traffic follows until a @@ -1886,7 +2384,7 @@ static int net2280_start(struct usb_gadget *_gadget, dev = container_of (_gadget, struct net2280, gadget); - for (i = 0; i < 7; i++) + for (i = 0; i < dev->n_ep; i++) dev->ep [i].irqs = 0; /* hook up the driver ... */ @@ -1900,13 +2398,17 @@ static int net2280_start(struct usb_gadget *_gadget, if (retval) goto err_func; /* Enable force-full-speed testing mode, if desired */ - if (full_speed) + if (full_speed && dev->pdev->vendor == 0x17cc) writel(1 << FORCE_FULL_SPEED_MODE, &dev->usb->xcvrdiag); /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. */ net2280_led_active (dev, 1); + + if (dev->pdev->vendor == 0x10b5) + defect7374_enable_data_eps_zero(dev); + ep0_start (dev); DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n", @@ -1937,7 +2439,7 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) * and kill any outstanding requests. */ usb_reset (dev); - for (i = 0; i < 7; i++) + for (i = 0; i < dev->n_ep; i++) nuke (&dev->ep [i]); /* report disconnect; the driver is already quiesced */ @@ -1967,7 +2469,8 @@ static int net2280_stop(struct usb_gadget *_gadget, net2280_led_active (dev, 0); /* Disable full-speed test mode */ - writel(0, &dev->usb->xcvrdiag); + if (dev->pdev->vendor == 0x17cc) + writel(0, &dev->usb->xcvrdiag); device_remove_file (&dev->pdev->dev, &dev_attr_function); device_remove_file (&dev->pdev->dev, &dev_attr_queues); @@ -2219,6 +2722,350 @@ get_ep_by_addr (struct net2280 *dev, u16 wIndex) return NULL; } +static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) +{ + u32 scratch, fsmvalue; + u32 ack_wait_timeout, state; + + /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */ + scratch = get_idx_reg(dev->regs, SCRATCH); + fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); + scratch &= ~(0xf << DEFECT7374_FSM_FIELD); + + if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) && + (r.bRequestType & USB_DIR_IN))) + return; + + /* This is the first Control Read for this connection: */ + if (!(readl(&dev->usb->usbstat) & (1 << SUPER_SPEED_MODE))) { + /* + * Connection is NOT SS: + * - Connection must be FS or HS. + * - This FSM state should allow workaround software to + * run after the next USB connection. + */ + scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; + goto restore_data_eps; + } + + /* Connection is SS: */ + for (ack_wait_timeout = 0; + ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS; + ack_wait_timeout++) { + + state = readl(&dev->plregs->pl_ep_status_1) + & (0xff << STATE); + if ((state >= (ACK_GOOD_NORMAL << STATE)) && + (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { + scratch |= DEFECT7374_FSM_SS_CONTROL_READ; + break; + } + + /* + * We have not yet received host's Data Phase ACK + * - Wait and try again. + */ + udelay(DEFECT_7374_PROCESSOR_WAIT_TIME); + + continue; + } + + + if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) { + ERROR(dev, "FAIL: Defect 7374 workaround waited but failed"); + ERROR(dev, "to detect SS host's data phase ACK."); + ERROR(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"); + ERROR(dev, "got 0x%2.2x.\n", state >> STATE); + } else { + WARNING(dev, "INFO: Defect 7374 workaround waited about\n"); + WARNING(dev, "%duSec for Control Read Data Phase ACK\n", + DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout); + } + +restore_data_eps: + /* + * Restore data EPs to their pre-workaround settings (disabled, + * initialized, and other details). + */ + defect7374_disable_data_eps(dev); + + set_idx_reg(dev->regs, SCRATCH, scratch); + + return; +} + +static void ep_stall(struct net2280_ep *ep, int stall) +{ + struct net2280 *dev = ep->dev; + u32 val; + static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; + + if (stall) { + writel((1 << SET_ENDPOINT_HALT) | + /* (1 << SET_NAK_PACKETS) | */ + (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), + &ep->regs->ep_rsp); + ep->is_halt = 1; + } else { + if (dev->gadget.speed == USB_SPEED_SUPER) { + /* + * Workaround for SS SeqNum not cleared via + * Endpoint Halt (Clear) bit. select endpoint + */ + val = readl(&dev->plregs->pl_ep_ctrl); + val = (val & ~0x1f) | ep_pl[ep->num]; + writel(val, &dev->plregs->pl_ep_ctrl); + + val |= (1 << SEQUENCE_NUMBER_RESET); + writel(val, &dev->plregs->pl_ep_ctrl); + } + val = readl(&ep->regs->ep_rsp); + val |= (1 << CLEAR_ENDPOINT_HALT) | + (1 << CLEAR_ENDPOINT_TOGGLE); + writel(val + /* | (1 << CLEAR_NAK_PACKETS)*/ + , &ep->regs->ep_rsp); + ep->is_halt = 0; + val = readl(&ep->regs->ep_rsp); + } +} + +static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged) +{ + /* set/clear, then synch memory views with the device */ + if (value) { + ep->stopped = 1; + if (ep->num == 0) + ep->dev->protocol_stall = 1; + else { + if (ep->dma) + ep_stop_dma(ep); + ep_stall(ep, true); + } + + if (wedged) + ep->wedged = 1; + } else { + ep->stopped = 0; + ep->wedged = 0; + + ep_stall(ep, false); + + /* Flush the queue */ + if (!list_empty(&ep->queue)) { + struct net2280_request *req = + list_entry(ep->queue.next, struct net2280_request, + queue); + if (ep->dma) + resume_dma(ep); + else { + if (ep->is_in) + write_fifo(ep, &req->req); + else { + if (read_fifo(ep, req)) + done(ep, req, 0); + } + } + } + } +} + +static void handle_stat0_irqs_superspeed(struct net2280 *dev, + struct net2280_ep *ep, struct usb_ctrlrequest r) +{ + int tmp = 0; + +#define w_value le16_to_cpu(r.wValue) +#define w_index le16_to_cpu(r.wIndex) +#define w_length le16_to_cpu(r.wLength) + + switch (r.bRequest) { + struct net2280_ep *e; + u16 status; + + case USB_REQ_SET_CONFIGURATION: + dev->addressed_state = !w_value; + goto usb3_delegate; + + case USB_REQ_GET_STATUS: + switch (r.bRequestType) { + case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): + status = dev->wakeup_enable ? 0x02 : 0x00; + if (dev->selfpowered) + status |= 1 << 0; + status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | + dev->ltm_enable << 4); + writel(0, &dev->epregs[0].ep_irqenb); + set_fifo_bytecount(ep, sizeof(status)); + writel((__force u32) status, &dev->epregs[0].ep_data); + allow_status_338x(ep); + break; + + case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): + e = get_ep_by_addr(dev, w_index); + if (!e) + goto do_stall3; + status = readl(&e->regs->ep_rsp) & + (1 << CLEAR_ENDPOINT_HALT); + writel(0, &dev->epregs[0].ep_irqenb); + set_fifo_bytecount(ep, sizeof(status)); + writel((__force u32) status, &dev->epregs[0].ep_data); + allow_status_338x(ep); + break; + + default: + goto usb3_delegate; + } + break; + + case USB_REQ_CLEAR_FEATURE: + switch (r.bRequestType) { + case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): + if (!dev->addressed_state) { + switch (w_value) { + case USB_DEVICE_U1_ENABLE: + dev->u1_enable = 0; + writel(readl(&dev->usb_ext->usbctl2) & + ~(1 << U1_ENABLE), + &dev->usb_ext->usbctl2); + allow_status_338x(ep); + goto next_endpoints3; + + case USB_DEVICE_U2_ENABLE: + dev->u2_enable = 0; + writel(readl(&dev->usb_ext->usbctl2) & + ~(1 << U2_ENABLE), + &dev->usb_ext->usbctl2); + allow_status_338x(ep); + goto next_endpoints3; + + case USB_DEVICE_LTM_ENABLE: + dev->ltm_enable = 0; + writel(readl(&dev->usb_ext->usbctl2) & + ~(1 << LTM_ENABLE), + &dev->usb_ext->usbctl2); + allow_status_338x(ep); + goto next_endpoints3; + + default: + break; + } + } + if (w_value == USB_DEVICE_REMOTE_WAKEUP) { + dev->wakeup_enable = 0; + writel(readl(&dev->usb->usbctl) & + ~(1 << DEVICE_REMOTE_WAKEUP_ENABLE), + &dev->usb->usbctl); + allow_status_338x(ep); + break; + } + goto usb3_delegate; + + case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): + e = get_ep_by_addr(dev, w_index); + if (!e) + goto do_stall3; + if (w_value != USB_ENDPOINT_HALT) + goto do_stall3; + VDEBUG(dev, "%s clear halt\n", e->ep.name); + ep_stall(e, false); + if (!list_empty(&e->queue) && e->td_dma) + restart_dma(e); + allow_status(ep); + ep->stopped = 1; + break; + + default: + goto usb3_delegate; + } + break; + case USB_REQ_SET_FEATURE: + switch (r.bRequestType) { + case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): + if (!dev->addressed_state) { + switch (w_value) { + case USB_DEVICE_U1_ENABLE: + dev->u1_enable = 1; + writel(readl(&dev->usb_ext->usbctl2) | + (1 << U1_ENABLE), + &dev->usb_ext->usbctl2); + allow_status_338x(ep); + goto next_endpoints3; + + case USB_DEVICE_U2_ENABLE: + dev->u2_enable = 1; + writel(readl(&dev->usb_ext->usbctl2) | + (1 << U2_ENABLE), + &dev->usb_ext->usbctl2); + allow_status_338x(ep); + goto next_endpoints3; + + case USB_DEVICE_LTM_ENABLE: + dev->ltm_enable = 1; + writel(readl(&dev->usb_ext->usbctl2) | + (1 << LTM_ENABLE), + &dev->usb_ext->usbctl2); + allow_status_338x(ep); + goto next_endpoints3; + default: + break; + } + } + + if (w_value == USB_DEVICE_REMOTE_WAKEUP) { + dev->wakeup_enable = 1; + writel(readl(&dev->usb->usbctl) | + (1 << DEVICE_REMOTE_WAKEUP_ENABLE), + &dev->usb->usbctl); + allow_status_338x(ep); + break; + } + goto usb3_delegate; + + case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): + e = get_ep_by_addr(dev, w_index); + if (!e || (w_value != USB_ENDPOINT_HALT)) + goto do_stall3; + ep_stdrsp(e, true, false); + allow_status_338x(ep); + break; + + default: + goto usb3_delegate; + } + + break; + default: + +usb3_delegate: + VDEBUG(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n", + r.bRequestType, r.bRequest, + w_value, w_index, w_length, + readl(&ep->cfg->ep_cfg)); + + ep->responded = 0; + spin_unlock(&dev->lock); + tmp = dev->driver->setup(&dev->gadget, &r); + spin_lock(&dev->lock); + } +do_stall3: + if (tmp < 0) { + VDEBUG(dev, "req %02x.%02x protocol STALL; stat %d\n", + r.bRequestType, r.bRequest, tmp); + dev->protocol_stall = 1; + /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ + ep_stall(ep, true); + } + +next_endpoints3: + +#undef w_value +#undef w_index +#undef w_length + + return; +} + static void handle_stat0_irqs (struct net2280 *dev, u32 stat) { struct net2280_ep *ep; @@ -2240,10 +3087,20 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) struct net2280_request *req; if (dev->gadget.speed == USB_SPEED_UNKNOWN) { - if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED)) + u32 val = readl(&dev->usb->usbstat); + if (val & (1 << SUPER_SPEED)) { + dev->gadget.speed = USB_SPEED_SUPER; + usb_ep_set_maxpacket_limit(&dev->ep[0].ep, + EP0_SS_MAX_PACKET_SIZE); + } else if (val & (1 << HIGH_SPEED)) { dev->gadget.speed = USB_SPEED_HIGH; - else + usb_ep_set_maxpacket_limit(&dev->ep[0].ep, + EP0_HS_MAX_PACKET_SIZE); + } else { dev->gadget.speed = USB_SPEED_FULL; + usb_ep_set_maxpacket_limit(&dev->ep[0].ep, + EP0_HS_MAX_PACKET_SIZE); + } net2280_led_speed (dev, dev->gadget.speed); DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed)); } @@ -2261,32 +3118,38 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) } ep->stopped = 0; dev->protocol_stall = 0; - - if (ep->dev->pdev->device == 0x2280) - tmp = (1 << FIFO_OVERFLOW) - | (1 << FIFO_UNDERFLOW); - else - tmp = 0; - - writel (tmp | (1 << TIMEOUT) - | (1 << USB_STALL_SENT) - | (1 << USB_IN_NAK_SENT) - | (1 << USB_IN_ACK_RCVD) - | (1 << USB_OUT_PING_NAK_SENT) - | (1 << USB_OUT_ACK_SENT) - | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT) - | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) - | (1 << DATA_PACKET_RECEIVED_INTERRUPT) - | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) - | (1 << DATA_OUT_PING_TOKEN_INTERRUPT) - | (1 << DATA_IN_TOKEN_INTERRUPT) - , &ep->regs->ep_stat); - u.raw [0] = readl (&dev->usb->setup0123); - u.raw [1] = readl (&dev->usb->setup4567); + if (dev->pdev->vendor == 0x10b5) + ep->is_halt = 0; + else{ + if (ep->dev->pdev->device == 0x2280) + tmp = (1 << FIFO_OVERFLOW) | + (1 << FIFO_UNDERFLOW); + else + tmp = 0; + + writel(tmp | (1 << TIMEOUT) | + (1 << USB_STALL_SENT) | + (1 << USB_IN_NAK_SENT) | + (1 << USB_IN_ACK_RCVD) | + (1 << USB_OUT_PING_NAK_SENT) | + (1 << USB_OUT_ACK_SENT) | + (1 << SHORT_PACKET_OUT_DONE_INTERRUPT) | + (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | + (1 << DATA_PACKET_RECEIVED_INTERRUPT) | + (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | + (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | + (1 << DATA_IN_TOKEN_INTERRUPT) + , &ep->regs->ep_stat); + } + u.raw[0] = readl(&dev->usb->setup0123); + u.raw[1] = readl(&dev->usb->setup4567); cpu_to_le32s (&u.raw [0]); cpu_to_le32s (&u.raw [1]); + if (dev->pdev->vendor == 0x10b5) + defect7374_workaround(dev, u.r); + tmp = 0; #define w_value le16_to_cpu(u.r.wValue) @@ -2318,6 +3181,12 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) * everything else goes uplevel to the gadget code. */ ep->responded = 1; + + if (dev->gadget.speed == USB_SPEED_SUPER) { + handle_stat0_irqs_superspeed(dev, ep, u.r); + goto next_endpoints; + } + switch (u.r.bRequest) { case USB_REQ_GET_STATUS: { struct net2280_ep *e; @@ -2360,8 +3229,11 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) VDEBUG(dev, "%s wedged, halt not cleared\n", ep->ep.name); } else { - VDEBUG(dev, "%s clear halt\n", ep->ep.name); + VDEBUG(dev, "%s clear halt\n", e->ep.name); clear_halt(e); + if (ep->dev->pdev->vendor == 0x10b5 && + !list_empty(&e->queue) && e->td_dma) + restart_dma(e); } allow_status (ep); goto next_endpoints; @@ -2381,6 +3253,8 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) if (e->ep.name == ep0name) goto do_stall; set_halt (e); + if (dev->pdev->vendor == 0x10b5 && e->dma) + abort_dma(e); allow_status (ep); VDEBUG (dev, "%s set halt\n", ep->ep.name); goto next_endpoints; @@ -2392,7 +3266,7 @@ delegate: "ep_cfg %08x\n", u.r.bRequestType, u.r.bRequest, w_value, w_index, w_length, - readl (&ep->regs->ep_cfg)); + readl(&ep->cfg->ep_cfg)); ep->responded = 0; spin_unlock (&dev->lock); tmp = dev->driver->setup (&dev->gadget, &u.r); @@ -2455,7 +3329,7 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat) /* after disconnect there's nothing else to do! */ tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); - mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED); + mask = (1 << SUPER_SPEED) | (1 << HIGH_SPEED) | (1 << FULL_SPEED); /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and @@ -2546,12 +3420,19 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat) tmp = readl (&dma->dmastat); writel (tmp, &dma->dmastat); + /* dma sync*/ + if (dev->pdev->vendor == 0x10b5) { + u32 r_dmacount = readl(&dma->dmacount); + if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && + (tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))) + continue; + } + /* chaining should stop on abort, short OUT from fifo, * or (stat0 codepath) short OUT transfer. */ if (!use_dma_chaining) { - if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT)) - == 0) { + if (!(tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))) { DEBUG (ep->dev, "%s no xact done? %08x\n", ep->ep.name, tmp); continue; @@ -2625,7 +3506,8 @@ static irqreturn_t net2280_irq (int irq, void *_dev) struct net2280 *dev = _dev; /* shared interrupt, not ours */ - if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED))) + if (dev->pdev->vendor == 0x17cc && + (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))) return IRQ_NONE; spin_lock (&dev->lock); @@ -2636,6 +3518,13 @@ static irqreturn_t net2280_irq (int irq, void *_dev) /* control requests and PIO */ handle_stat0_irqs (dev, readl (&dev->regs->irqstat0)); + if (dev->pdev->vendor == 0x10b5) { + /* re-enable interrupt to trigger any possible new interrupt */ + u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); + writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); + writel(pciirqenb1, &dev->regs->pciirqenb1); + } + spin_unlock (&dev->lock); return IRQ_HANDLED; @@ -2674,6 +3563,8 @@ static void net2280_remove (struct pci_dev *pdev) } if (dev->got_irq) free_irq (pdev->irq, dev); + if (use_msi && dev->pdev->vendor == 0x10b5) + pci_disable_msi(pdev); if (dev->regs) iounmap (dev->regs); if (dev->region) @@ -2708,7 +3599,8 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init (&dev->lock); dev->pdev = pdev; dev->gadget.ops = &net2280_ops; - dev->gadget.max_speed = USB_SPEED_HIGH; + dev->gadget.max_speed = (dev->pdev->vendor == 0x10b5) ? + USB_SPEED_SUPER : USB_SPEED_HIGH; /* the "gadget" abstracts/virtualizes the controller */ dev->gadget.name = driver_name; @@ -2750,8 +3642,39 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); - /* put into initial config, link up all endpoints */ - writel (0, &dev->usb->usbctl); + if (dev->pdev->vendor == 0x10b5) { + u32 fsmvalue; + u32 usbstat; + dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) + (base + 0x00b4); + dev->fiforegs = (struct usb338x_fifo_regs __iomem *) + (base + 0x0500); + dev->llregs = (struct usb338x_ll_regs __iomem *) + (base + 0x0700); + dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) + (base + 0x0748); + dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) + (base + 0x077c); + dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) + (base + 0x079c); + dev->plregs = (struct usb338x_pl_regs __iomem *) + (base + 0x0800); + usbstat = readl(&dev->usb->usbstat); + dev->enhanced_mode = (usbstat & (1 << 11)) ? 1 : 0; + dev->n_ep = (dev->enhanced_mode) ? 9 : 5; + /* put into initial config, link up all endpoints */ + fsmvalue = get_idx_reg(dev->regs, SCRATCH) & + (0xf << DEFECT7374_FSM_FIELD); + /* See if firmware needs to set up for workaround: */ + if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) + writel(0, &dev->usb->usbctl); + } else{ + dev->enhanced_mode = 0; + dev->n_ep = 7; + /* put into initial config, link up all endpoints */ + writel(0, &dev->usb->usbctl); + } + usb_reset (dev); usb_reinit (dev); @@ -2762,6 +3685,10 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) goto done; } + if (use_msi && dev->pdev->vendor == 0x10b5) + if (pci_enable_msi(pdev)) + ERROR(dev, "Failed to enable MSI mode\n"); + if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev) != 0) { ERROR (dev, "request interrupt %d failed\n", pdev->irq); @@ -2797,7 +3724,8 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) } /* enable lower-overhead pci memory bursts during DMA */ - writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) + if (dev->pdev->vendor == 0x17cc) + writel((1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) // 256 write retries may not be enough... // | (1 << PCI_RETRY_ABORT_ENABLE) | (1 << DMA_READ_MULTIPLE_ENABLE) @@ -2814,10 +3742,10 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) INFO (dev, "%s\n", driver_desc); INFO (dev, "irq %d, pci mem %p, chip rev %04x\n", pdev->irq, base, dev->chiprev); - INFO (dev, "version: " DRIVER_VERSION "; dma %s\n", - use_dma - ? (use_dma_chaining ? "chaining" : "enabled") - : "disabled"); + INFO(dev, "version: " DRIVER_VERSION "; dma %s %s\n", + use_dma ? (use_dma_chaining ? "chaining" : "enabled") + : "disabled", + dev->enhanced_mode ? "enhanced mode" : "legacy mode"); retval = device_create_file (&pdev->dev, &dev_attr_registers); if (retval) goto done; @@ -2849,7 +3777,8 @@ static void net2280_shutdown (struct pci_dev *pdev) writel (0, &dev->usb->usbctl); /* Disable full-speed test mode */ - writel(0, &dev->usb->xcvrdiag); + if (dev->pdev->vendor == 0x17cc) + writel(0, &dev->usb->xcvrdiag); } @@ -2869,8 +3798,24 @@ static const struct pci_device_id pci_ids [] = { { .device = 0x2282, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, - -}, { /* end: all zeroes */ } +}, + { + .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), + .class_mask = ~0, + .vendor = 0x10b5, + .device = 0x3380, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { + .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), + .class_mask = ~0, + .vendor = 0x10b5, + .device = 0x3382, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, +{ /* end: all zeroes */ } }; MODULE_DEVICE_TABLE (pci, pci_ids); diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h index a844be0d683a..f32c2746b6ae 100644 --- a/drivers/usb/gadget/net2280.h +++ b/drivers/usb/gadget/net2280.h @@ -6,6 +6,7 @@ /* * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell + * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,6 +15,7 @@ */ #include +#include /*-------------------------------------------------------------------------*/ @@ -59,6 +61,13 @@ set_idx_reg (struct net2280_regs __iomem *regs, u32 index, u32 value) #define CHIPREV_1 0x0100 #define CHIPREV_1A 0x0110 +/* DEFECT 7374 */ +#define DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS 200 +#define DEFECT_7374_PROCESSOR_WAIT_TIME 10 + +/* ep0 max packet size */ +#define EP0_SS_MAX_PACKET_SIZE 0x200 +#define EP0_HS_MAX_PACKET_SIZE 0x40 #ifdef __KERNEL__ /* ep a-f highspeed and fullspeed maxpacket, addresses @@ -85,12 +94,15 @@ struct net2280_dma { struct net2280_ep { struct usb_ep ep; + struct net2280_ep_regs __iomem *cfg; struct net2280_ep_regs __iomem *regs; struct net2280_dma_regs __iomem *dma; struct net2280_dma *dummy; + struct usb338x_fifo_regs __iomem *fiforegs; dma_addr_t td_dma; /* of dummy */ struct net2280 *dev; unsigned long irqs; + unsigned is_halt:1, dma_started:1; /* analogous to a host-side qh */ struct list_head queue; @@ -116,10 +128,19 @@ static inline void allow_status (struct net2280_ep *ep) ep->stopped = 1; } -/* count (<= 4) bytes in the next fifo write will be valid */ -static inline void set_fifo_bytecount (struct net2280_ep *ep, unsigned count) +static void allow_status_338x(struct net2280_ep *ep) { - writeb (count, 2 + (u8 __iomem *) &ep->regs->ep_cfg); + /* + * Control Status Phase Handshake was set by the chip when the setup + * packet arrived. While set, the chip automatically NAKs the host's + * Status Phase tokens. + */ + writel(1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE, &ep->regs->ep_rsp); + + ep->stopped = 1; + + /* TD 9.9 Halt Endpoint test. TD 9.22 set feature test. */ + ep->responded = 0; } struct net2280_request { @@ -135,23 +156,38 @@ struct net2280 { /* each pci device provides one gadget, several endpoints */ struct usb_gadget gadget; spinlock_t lock; - struct net2280_ep ep [7]; + struct net2280_ep ep[9]; struct usb_gadget_driver *driver; unsigned enabled : 1, protocol_stall : 1, softconnect : 1, got_irq : 1, - region : 1; + region:1, + u1_enable:1, + u2_enable:1, + ltm_enable:1, + wakeup_enable:1, + selfpowered:1, + addressed_state:1; u16 chiprev; + int enhanced_mode; + int n_ep; /* pci state used to access those endpoints */ struct pci_dev *pdev; struct net2280_regs __iomem *regs; struct net2280_usb_regs __iomem *usb; + struct usb338x_usb_ext_regs __iomem *usb_ext; struct net2280_pci_regs __iomem *pci; struct net2280_dma_regs __iomem *dma; struct net2280_dep_regs __iomem *dep; struct net2280_ep_regs __iomem *epregs; + struct usb338x_fifo_regs __iomem *fiforegs; + struct usb338x_ll_regs __iomem *llregs; + struct usb338x_ll_lfps_regs __iomem *ll_lfps_regs; + struct usb338x_ll_tsn_regs __iomem *ll_tsn_regs; + struct usb338x_ll_chi_regs __iomem *ll_chicken_reg; + struct usb338x_pl_regs __iomem *plregs; struct pci_pool *requests; // statistics... @@ -179,6 +215,43 @@ static inline void clear_halt (struct net2280_ep *ep) , &ep->regs->ep_rsp); } +/* + * FSM value for Defect 7374 (U1U2 Test) is managed in + * chip's SCRATCH register: + */ +#define DEFECT7374_FSM_FIELD 28 + +/* Waiting for Control Read: + * - A transition to this state indicates a fresh USB connection, + * before the first Setup Packet. The connection speed is not + * known. Firmware is waiting for the first Control Read. + * - Starting state: This state can be thought of as the FSM's typical + * starting state. + * - Tip: Upon the first SS Control Read the FSM never + * returns to this state. + */ +#define DEFECT7374_FSM_WAITING_FOR_CONTROL_READ (1 << DEFECT7374_FSM_FIELD) + +/* Non-SS Control Read: + * - A transition to this state indicates detection of the first HS + * or FS Control Read. + * - Tip: Upon the first SS Control Read the FSM never + * returns to this state. + */ +#define DEFECT7374_FSM_NON_SS_CONTROL_READ (2 << DEFECT7374_FSM_FIELD) + +/* SS Control Read: + * - A transition to this state indicates detection of the + * first SS Control Read. + * - This state indicates workaround completion. Workarounds no longer + * need to be applied (as long as the chip remains powered up). + * - Tip: Once in this state the FSM state does not change (until + * the chip's power is lost and restored). + * - This can be thought of as the final state of the FSM; + * the FSM 'locks-up' in this state until the chip loses power. + */ +#define DEFECT7374_FSM_SS_CONTROL_READ (3 << DEFECT7374_FSM_FIELD) + #ifdef USE_RDK_LEDS static inline void net2280_led_init (struct net2280 *dev) @@ -198,6 +271,9 @@ void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed) { u32 val = readl (&dev->regs->gpioctl); switch (speed) { + case USB_SPEED_SUPER: /* green + red */ + val |= (1 << GPIO0_DATA) | (1 << GPIO1_DATA); + break; case USB_SPEED_HIGH: /* green */ val &= ~(1 << GPIO0_DATA); val |= (1 << GPIO1_DATA); @@ -271,6 +347,17 @@ static inline void net2280_led_shutdown (struct net2280 *dev) /*-------------------------------------------------------------------------*/ +static inline void set_fifo_bytecount(struct net2280_ep *ep, unsigned count) +{ + if (ep->dev->pdev->vendor == 0x17cc) + writeb(count, 2 + (u8 __iomem *) &ep->regs->ep_cfg); + else{ + u32 tmp = readl(&ep->cfg->ep_cfg) & + (~(0x07 << EP_FIFO_BYTE_COUNT)); + writel(tmp | (count << EP_FIFO_BYTE_COUNT), &ep->cfg->ep_cfg); + } +} + static inline void start_out_naking (struct net2280_ep *ep) { /* NOTE: hardware races lurk here, and PING protocol issues */ diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h new file mode 100644 index 000000000000..f92eb635b9d3 --- /dev/null +++ b/include/linux/usb/usb338x.h @@ -0,0 +1,199 @@ +/* + * USB 338x super/high/full speed USB device controller. + * Unlike many such controllers, this one talks PCI. + * + * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) + * Copyright (C) 2003 David Brownell + * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_USB338X_H +#define __LINUX_USB_USB338X_H + +#include + +/* + * Extra defined bits for net2280 registers + */ +#define SCRATCH 0x0b + +#define DEFECT7374_FSM_FIELD 28 +#define SUPER_SPEED 8 +#define DMA_REQUEST_OUTSTANDING 5 +#define DMA_PAUSE_DONE_INTERRUPT 26 +#define SET_ISOCHRONOUS_DELAY 24 +#define SET_SEL 22 +#define SUPER_SPEED_MODE 8 + +/*ep_cfg*/ +#define MAX_BURST_SIZE 24 +#define EP_FIFO_BYTE_COUNT 16 +#define IN_ENDPOINT_ENABLE 14 +#define IN_ENDPOINT_TYPE 12 +#define OUT_ENDPOINT_ENABLE 10 +#define OUT_ENDPOINT_TYPE 8 + +struct usb338x_usb_ext_regs { + u32 usbclass; +#define DEVICE_PROTOCOL 16 +#define DEVICE_SUB_CLASS 8 +#define DEVICE_CLASS 0 + u32 ss_sel; +#define U2_SYSTEM_EXIT_LATENCY 8 +#define U1_SYSTEM_EXIT_LATENCY 0 + u32 ss_del; +#define U2_DEVICE_EXIT_LATENCY 8 +#define U1_DEVICE_EXIT_LATENCY 0 + u32 usb2lpm; +#define USB_L1_LPM_HIRD 2 +#define USB_L1_LPM_REMOTE_WAKE 1 +#define USB_L1_LPM_SUPPORT 0 + u32 usb3belt; +#define BELT_MULTIPLIER 10 +#define BEST_EFFORT_LATENCY_TOLERANCE 0 + u32 usbctl2; +#define LTM_ENABLE 7 +#define U2_ENABLE 6 +#define U1_ENABLE 5 +#define FUNCTION_SUSPEND 4 +#define USB3_CORE_ENABLE 3 +#define USB2_CORE_ENABLE 2 +#define SERIAL_NUMBER_STRING_ENABLE 0 + u32 in_timeout; +#define GPEP3_TIMEOUT 19 +#define GPEP2_TIMEOUT 18 +#define GPEP1_TIMEOUT 17 +#define GPEP0_TIMEOUT 16 +#define GPEP3_TIMEOUT_VALUE 13 +#define GPEP3_TIMEOUT_ENABLE 12 +#define GPEP2_TIMEOUT_VALUE 9 +#define GPEP2_TIMEOUT_ENABLE 8 +#define GPEP1_TIMEOUT_VALUE 5 +#define GPEP1_TIMEOUT_ENABLE 4 +#define GPEP0_TIMEOUT_VALUE 1 +#define GPEP0_TIMEOUT_ENABLE 0 + u32 isodelay; +#define ISOCHRONOUS_DELAY 0 +} __packed; + +struct usb338x_fifo_regs { + /* offset 0x0500, 0x0520, 0x0540, 0x0560, 0x0580 */ + u32 ep_fifo_size_base; +#define IN_FIFO_BASE_ADDRESS 22 +#define IN_FIFO_SIZE 16 +#define OUT_FIFO_BASE_ADDRESS 6 +#define OUT_FIFO_SIZE 0 + u32 ep_fifo_out_wrptr; + u32 ep_fifo_out_rdptr; + u32 ep_fifo_in_wrptr; + u32 ep_fifo_in_rdptr; + u32 unused[3]; +} __packed; + + +/* Link layer */ +struct usb338x_ll_regs { + /* offset 0x700 */ + u32 ll_ltssm_ctrl1; + u32 ll_ltssm_ctrl2; + u32 ll_ltssm_ctrl3; + u32 unused[2]; + u32 ll_general_ctrl0; + u32 ll_general_ctrl1; +#define PM_U3_AUTO_EXIT 29 +#define PM_U2_AUTO_EXIT 28 +#define PM_U1_AUTO_EXIT 27 +#define PM_FORCE_U2_ENTRY 26 +#define PM_FORCE_U1_ENTRY 25 +#define PM_LGO_COLLISION_SEND_LAU 24 +#define PM_DIR_LINK_REJECT 23 +#define PM_FORCE_LINK_ACCEPT 22 +#define PM_DIR_ENTRY_U3 20 +#define PM_DIR_ENTRY_U2 19 +#define PM_DIR_ENTRY_U1 18 +#define PM_U2_ENABLE 17 +#define PM_U1_ENABLE 16 +#define SKP_THRESHOLD_ADJUST_FMW 8 +#define RESEND_DPP_ON_LRTY_FMW 7 +#define DL_BIT_VALUE_FMW 6 +#define FORCE_DL_BIT 5 + u32 ll_general_ctrl2; +#define SELECT_INVERT_LANE_POLARITY 7 +#define FORCE_INVERT_LANE_POLARITY 6 + u32 ll_general_ctrl3; + u32 ll_general_ctrl4; + u32 ll_error_gen; +} __packed; + +struct usb338x_ll_lfps_regs { + /* offset 0x748 */ + u32 ll_lfps_5; +#define TIMER_LFPS_6US 16 + u32 ll_lfps_6; +#define TIMER_LFPS_80US 0 +} __packed; + +struct usb338x_ll_tsn_regs { + /* offset 0x77C */ + u32 ll_tsn_counters_2; +#define HOT_TX_NORESET_TS2 24 + u32 ll_tsn_counters_3; +#define HOT_RX_RESET_TS2 0 +} __packed; + +struct usb338x_ll_chi_regs { + /* offset 0x79C */ + u32 ll_tsn_chicken_bit; +#define RECOVERY_IDLE_TO_RECOVER_FMW 3 +} __packed; + +/* protocol layer */ +struct usb338x_pl_regs { + /* offset 0x800 */ + u32 pl_reg_1; + u32 pl_reg_2; + u32 pl_reg_3; + u32 pl_reg_4; + u32 pl_ep_ctrl; + /* Protocol Layer Endpoint Control*/ +#define PL_EP_CTRL 0x810 +#define ENDPOINT_SELECT 0 + /* [4:0] */ +#define EP_INITIALIZED 16 +#define SEQUENCE_NUMBER_RESET 17 +#define CLEAR_ACK_ERROR_CODE 20 + u32 pl_reg_6; + u32 pl_reg_7; + u32 pl_reg_8; + u32 pl_ep_status_1; + /* Protocol Layer Endpoint Status 1*/ +#define PL_EP_STATUS_1 0x820 +#define STATE 16 +#define ACK_GOOD_NORMAL 0x11 +#define ACK_GOOD_MORE_ACKS_TO_COME 0x16 + u32 pl_ep_status_2; + u32 pl_ep_status_3; + /* Protocol Layer Endpoint Status 3*/ +#define PL_EP_STATUS_3 0x828 +#define SEQUENCE_NUMBER 0 + u32 pl_ep_status_4; + /* Protocol Layer Endpoint Status 4*/ +#define PL_EP_STATUS_4 0x82c + u32 pl_ep_cfg_4; + /* Protocol Layer Endpoint Configuration 4*/ +#define PL_EP_CFG_4 0x830 +#define NON_CTRL_IN_TOLERATE_BAD_DIR 6 +} __packed; + +#endif /* __LINUX_USB_USB338X_H */ -- cgit v1.2.3 From d97ffe236894856d08146390ef3fbe6448a8ac2b Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Wed, 21 May 2014 15:23:30 +1000 Subject: PCI: Fix return value from pci_user_{read,write}_config_*() The PCI user-space config accessors pci_user_{read,write}_config_*() return negative error numbers, which were introduced by commit 34e3207205ef ("PCI: handle positive error codes"). That patch converted all positive error numbers from platform-specific PCI config accessors to -EINVAL, which means the callers don't know anything about the specific cause of the failure. The patch fixes the issue by converting the positive PCIBIOS_* error values to generic negative error numbers with pcibios_err_to_errno(). [bhelgaas: changelog] Signed-off-by: Gavin Shan Signed-off-by: Bjorn Helgaas Acked-by: Greg Thelen --- drivers/pci/access.c | 12 ++++-------- include/linux/pci.h | 4 ++-- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 7f8b78c08879..8c148f39e8d7 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -148,7 +148,7 @@ static noinline void pci_wait_cfg(struct pci_dev *dev) int pci_user_read_config_##size \ (struct pci_dev *dev, int pos, type *val) \ { \ - int ret = 0; \ + int ret = PCIBIOS_SUCCESSFUL; \ u32 data = -1; \ if (PCI_##size##_BAD) \ return -EINVAL; \ @@ -159,9 +159,7 @@ int pci_user_read_config_##size \ pos, sizeof(type), &data); \ raw_spin_unlock_irq(&pci_lock); \ *val = (type)data; \ - if (ret > 0) \ - ret = -EINVAL; \ - return ret; \ + return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); @@ -170,7 +168,7 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); int pci_user_write_config_##size \ (struct pci_dev *dev, int pos, type val) \ { \ - int ret = -EIO; \ + int ret = PCIBIOS_SUCCESSFUL; \ if (PCI_##size##_BAD) \ return -EINVAL; \ raw_spin_lock_irq(&pci_lock); \ @@ -179,9 +177,7 @@ int pci_user_write_config_##size \ ret = dev->bus->ops->write(dev->bus, dev->devfn, \ pos, sizeof(type), val); \ raw_spin_unlock_irq(&pci_lock); \ - if (ret > 0) \ - ret = -EINVAL; \ - return ret; \ + return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_write_config_##size); diff --git a/include/linux/pci.h b/include/linux/pci.h index 018877b8b4e8..322335aaa7e1 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -518,7 +518,7 @@ static inline int pcibios_err_to_errno(int err) case PCIBIOS_FUNC_NOT_SUPPORTED: return -ENOENT; case PCIBIOS_BAD_VENDOR_ID: - return -EINVAL; + return -ENOTTY; case PCIBIOS_DEVICE_NOT_FOUND: return -ENODEV; case PCIBIOS_BAD_REGISTER_NUMBER: @@ -529,7 +529,7 @@ static inline int pcibios_err_to_errno(int err) return -ENOSPC; } - return -ENOTTY; + return -ERANGE; } /* Low-level architecture-dependent routines */ -- cgit v1.2.3 From 6fecd4f2a58c60028b1a75deefcf111516d3f836 Mon Sep 17 00:00:00 2001 From: Todd E Brandt Date: Mon, 19 May 2014 10:55:32 -0700 Subject: USB: separate usb_address0 mutexes for each bus This patch creates a separate instance of the usb_address0 mutex for each USB bus, and attaches it to the usb_bus device struct. This allows devices on separate buses to be enumerated in parallel; saving time. In the current code, there is a single, global instance of the usb_address0 mutex which is used for all devices on all buses. This isn't completely necessary, as this mutex is only needed to prevent address0 collisions for devices on the *same* bus (usb 2.0 spec, sec 4.6.1). This superfluous coverage can cause additional delay in system resume on systems with multiple hosts (up to several seconds depending on what devices are attached). Signed-off-by: Todd Brandt Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd.c | 1 + drivers/usb/core/hub.c | 6 ++---- include/linux/usb.h | 2 ++ 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index adddc66c9e8d..174eb857a6b4 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -918,6 +918,7 @@ static void usb_bus_init (struct usb_bus *bus) bus->bandwidth_allocated = 0; bus->bandwidth_int_reqs = 0; bus->bandwidth_isoc_reqs = 0; + mutex_init(&bus->usb_address0_mutex); INIT_LIST_HEAD (&bus->bus_list); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 090469ebfcff..726fa072c3fe 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4016,8 +4016,6 @@ static int hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, int retry_counter) { - static DEFINE_MUTEX(usb_address0_mutex); - struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); int i, j, retval; @@ -4040,7 +4038,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME; - mutex_lock(&usb_address0_mutex); + mutex_lock(&hdev->bus->usb_address0_mutex); /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ @@ -4317,7 +4315,7 @@ fail: hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } - mutex_unlock(&usb_address0_mutex); + mutex_unlock(&hdev->bus->usb_address0_mutex); return retval; } diff --git a/include/linux/usb.h b/include/linux/usb.h index 6b7ec376fb4d..d2465bc0e73c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -352,6 +352,8 @@ struct usb_bus { struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ struct list_head bus_list; /* list of busses */ + struct mutex usb_address0_mutex; /* unaddressed device mutex */ + int bandwidth_allocated; /* on this bus: how much of the time * reserved for periodic (intr/iso) * requests is used, on average? -- cgit v1.2.3 From 95f096849932fe5eaa7bfec887530cf556744a76 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 27 May 2014 17:46:48 -0600 Subject: blk-mq: allow non-softirq completions Right now we export two ways of completing a request: 1) blk_mq_complete_request(). This uses an IPI (if needed) and completes through q->softirq_done_fn(). It also works with timeouts. 2) blk_mq_end_io(). This completes inline, and ignores any timeout state of the request. Let blk_mq_complete_request() handle non-softirq_done_fn completions as well, by just completing inline. If a driver has enough completion ports to place completions correctly, it need not define a mq_ops->complete() and we can avoid an indirect function call by doing the completion inline. Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 +++++++++--- include/linux/blk-mq.h | 4 ++++ 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 30bad930e661..010b878d53b3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -434,10 +434,16 @@ void __blk_mq_complete_request(struct request *rq) **/ void blk_mq_complete_request(struct request *rq) { - if (unlikely(blk_should_fake_timeout(rq->q))) + struct request_queue *q = rq->q; + + if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) - __blk_mq_complete_request(rq); + if (!blk_mark_rq_complete(rq)) { + if (q->softirq_done_fn) + __blk_mq_complete_request(rq); + else + blk_mq_end_io(rq, rq->errors); + } } EXPORT_SYMBOL(blk_mq_complete_request); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index afeb93496907..1dfeb1529a61 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -173,6 +173,10 @@ void __blk_mq_end_io(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); +/* + * Complete request through potential IPI for right placement. Driver must + * have defined a mq_ops->complete() hook for this. + */ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); -- cgit v1.2.3 From 4ec65b77c64504e178d75aaba6ac96f68837416c Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Thu, 24 Apr 2014 14:35:25 +0200 Subject: mcb: Add support for shared PCI IRQs Add support for shared PCI IRQs to mcb and mcb-pci. Signed-off-by: Johannes Thumshirn Signed-off-by: Greg Kroah-Hartman --- drivers/mcb/mcb-core.c | 20 +++++++++++++++----- drivers/mcb/mcb-pci.c | 17 ++++++++++++++++- include/linux/mcb.h | 6 +++++- 3 files changed, 36 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index bbe12932d404..9018ab83517a 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -183,14 +183,14 @@ EXPORT_SYMBOL_GPL(mcb_device_register); * * Allocate a new @mcb_bus. */ -struct mcb_bus *mcb_alloc_bus(void) +struct mcb_bus *mcb_alloc_bus(struct device *carrier) { struct mcb_bus *bus; int bus_nr; bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); if (!bus) - return NULL; + return ERR_PTR(-ENOMEM); bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { @@ -200,7 +200,7 @@ struct mcb_bus *mcb_alloc_bus(void) INIT_LIST_HEAD(&bus->children); bus->bus_nr = bus_nr; - + bus->carrier = carrier; return bus; } EXPORT_SYMBOL_GPL(mcb_alloc_bus); @@ -378,6 +378,13 @@ void mcb_release_mem(struct resource *mem) } EXPORT_SYMBOL_GPL(mcb_release_mem); +static int __mcb_get_irq(struct mcb_device *dev) +{ + struct resource *irq = &dev->irq; + + return irq->start; +} + /** * mcb_get_irq() - Get device's IRQ number * @dev: The @mcb_device the IRQ is for @@ -386,9 +393,12 @@ EXPORT_SYMBOL_GPL(mcb_release_mem); */ int mcb_get_irq(struct mcb_device *dev) { - struct resource *irq = &dev->irq; + struct mcb_bus *bus = dev->bus; - return irq->start; + if (bus->get_irq) + return bus->get_irq(dev); + + return __mcb_get_irq(dev); } EXPORT_SYMBOL_GPL(mcb_get_irq); diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index 99c742cbfb5b..b59181965643 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c @@ -20,6 +20,15 @@ struct priv { void __iomem *base; }; +static int mcb_pci_get_irq(struct mcb_device *mdev) +{ + struct mcb_bus *mbus = mdev->bus; + struct device *dev = mbus->carrier; + struct pci_dev *pdev = to_pci_dev(dev); + + return pdev->irq; +} + static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct priv *priv; @@ -67,7 +76,13 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, priv); - priv->bus = mcb_alloc_bus(); + priv->bus = mcb_alloc_bus(&pdev->dev); + if (IS_ERR(priv->bus)) { + ret = PTR_ERR(priv->bus); + goto err_drvdata; + } + + priv->bus->get_irq = mcb_pci_get_irq; ret = chameleon_parse_cells(priv->bus, mapbase, priv->base); if (ret < 0) diff --git a/include/linux/mcb.h b/include/linux/mcb.h index 2db284d14064..ed06e15a36aa 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -16,6 +16,7 @@ #include struct mcb_driver; +struct mcb_device; /** * struct mcb_bus - MEN Chameleon Bus @@ -23,11 +24,14 @@ struct mcb_driver; * @dev: pointer to carrier device * @children: the child busses * @bus_nr: mcb bus number + * @get_irq: callback to get IRQ number */ struct mcb_bus { struct list_head children; struct device dev; + struct device *carrier; int bus_nr; + int (*get_irq)(struct mcb_device *dev); }; #define to_mcb_bus(b) container_of((b), struct mcb_bus, dev) @@ -105,7 +109,7 @@ extern void mcb_unregister_driver(struct mcb_driver *driver); module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver); extern void mcb_bus_add_devices(const struct mcb_bus *bus); extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev); -extern struct mcb_bus *mcb_alloc_bus(void); +extern struct mcb_bus *mcb_alloc_bus(struct device *carrier); extern struct mcb_bus *mcb_bus_get(struct mcb_bus *bus); extern void mcb_bus_put(struct mcb_bus *bus); extern struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus); -- cgit v1.2.3 From f82dd4b093ead1161770de70515cb11602ac664c Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Fri, 16 May 2014 04:36:13 -0400 Subject: miscdevice.h: Simple syntax fix to make pointers consistent. Signed-off-by: Robert P. J. Day Signed-off-by: Greg Kroah-Hartman --- include/linux/miscdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 51e26f3cd3b3..ee80dd7d9f60 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -64,7 +64,7 @@ struct miscdevice { umode_t mode; }; -extern int misc_register(struct miscdevice * misc); +extern int misc_register(struct miscdevice *misc); extern int misc_deregister(struct miscdevice *misc); #define MODULE_ALIAS_MISCDEV(minor) \ -- cgit v1.2.3 From 7738dac4f697ffbd0ed4c4aeb69a714ef9d876da Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 28 May 2014 08:06:34 -0600 Subject: blk-mq: remove stale comment for blk_mq_complete_request() It works for both IPI and local completions as of commit 95f096849932. Signed-off-by: Jens Axboe --- include/linux/blk-mq.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1dfeb1529a61..5b171fbe95c5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -172,11 +172,6 @@ void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); - -/* - * Complete request through potential IPI for right placement. Driver must - * have defined a mq_ops->complete() hook for this. - */ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); -- cgit v1.2.3 From 6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 28 May 2014 08:08:02 -0600 Subject: blk-mq: add helper to insert requests from irq context Both the cache flush state machine and the SCSI midlayer want to submit requests from irq context, and the current per-request requeue_work unfortunately causes corruption due to sharing with the csd field for flushes. Replace them with a per-request_queue list of requests to be requeued. Based on an earlier test by Ming Lei. Signed-off-by: Christoph Hellwig Reported-by: Ming Lei Tested-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-flush.c | 16 ++++--------- block/blk-mq.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/blk-mq.h | 2 ++ include/linux/blkdev.h | 5 +++- 4 files changed, 73 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/block/blk-flush.c b/block/blk-flush.c index ec7a224d6733..ef608b35d9be 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq) blk_clear_rq_complete(rq); } -static void mq_flush_run(struct work_struct *work) -{ - struct request *rq; - - rq = container_of(work, struct request, requeue_work); - - memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_insert_request(rq, false, true, false); -} - static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { - INIT_WORK(&rq->requeue_work, mq_flush_run); - kblockd_schedule_work(&rq->requeue_work); + struct request_queue *q = rq->q; + + blk_mq_add_to_requeue_list(rq, add_front); + blk_mq_kick_requeue_list(q); return false; } else { if (add_front) diff --git a/block/blk-mq.c b/block/blk-mq.c index 010b878d53b3..67066ecc79c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -516,10 +516,68 @@ void blk_mq_requeue_request(struct request *rq) blk_clear_rq_complete(rq); BUG_ON(blk_queued_rq(rq)); - blk_mq_insert_request(rq, true, true, false); + blk_mq_add_to_requeue_list(rq, true); } EXPORT_SYMBOL(blk_mq_requeue_request); +static void blk_mq_requeue_work(struct work_struct *work) +{ + struct request_queue *q = + container_of(work, struct request_queue, requeue_work); + LIST_HEAD(rq_list); + struct request *rq, *next; + unsigned long flags; + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + list_for_each_entry_safe(rq, next, &rq_list, queuelist) { + if (!(rq->cmd_flags & REQ_SOFTBARRIER)) + continue; + + rq->cmd_flags &= ~REQ_SOFTBARRIER; + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, true, false, false); + } + + while (!list_empty(&rq_list)) { + rq = list_entry(rq_list.next, struct request, queuelist); + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, false, false, false); + } + + blk_mq_run_queues(q, false); +} + +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) +{ + struct request_queue *q = rq->q; + unsigned long flags; + + /* + * We abuse this flag that is otherwise used by the I/O scheduler to + * request head insertation from the workqueue. + */ + BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); + + spin_lock_irqsave(&q->requeue_lock, flags); + if (at_head) { + rq->cmd_flags |= REQ_SOFTBARRIER; + list_add(&rq->queuelist, &q->requeue_list); + } else { + list_add_tail(&rq->queuelist, &q->requeue_list); + } + spin_unlock_irqrestore(&q->requeue_lock, flags); +} +EXPORT_SYMBOL(blk_mq_add_to_requeue_list); + +void blk_mq_kick_requeue_list(struct request_queue *q) +{ + kblockd_schedule_work(&q->requeue_work); +} +EXPORT_SYMBOL(blk_mq_kick_requeue_list); + struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { return tags->rqs[tag]; @@ -1812,6 +1870,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->sg_reserved_size = INT_MAX; + INIT_WORK(&q->requeue_work, blk_mq_requeue_work); + INIT_LIST_HEAD(&q->requeue_list); + spin_lock_init(&q->requeue_lock); + if (q->nr_hw_queues > 1) blk_queue_make_request(q, blk_mq_make_request); else diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5b171fbe95c5..b9a74a386dbc 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -172,6 +172,8 @@ void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6bc011a09e82..913f1c2d3be0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -99,7 +99,6 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct requeue_work; unsigned long fifo_time; }; @@ -463,6 +462,10 @@ struct request_queue { struct request *flush_rq; spinlock_t mq_flush_lock; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct work_struct requeue_work; + struct mutex sysfs_lock; int bypass_depth; -- cgit v1.2.3 From 4ce01dd1a07d9cf3eaf44fbf4ea9a61b11badccc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 May 2014 20:59:46 +0200 Subject: blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request Instead of having two almost identical copies of the same code just let the callers pass in the reserved flag directly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq.c | 20 +++----------------- include/linux/blk-mq.h | 4 ++-- 3 files changed, 6 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 29d5fbafd94a..d87be5b4e554 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1173,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { if (q->mq_ops) - return blk_mq_alloc_request(q, rw, gfp_mask); + return blk_mq_alloc_request(q, rw, gfp_mask, false); else return blk_old_get_request(q, rw, gfp_mask); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 67066ecc79c0..63d581d72a70 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -294,35 +294,21 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, return rq; } -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, + bool reserved) { struct request *rq; if (blk_mq_queue_enter(q)) return NULL; - rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); + rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); if (rq) blk_mq_put_ctx(rq->mq_ctx); return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); -struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, - gfp_t gfp) -{ - struct request *rq; - - if (blk_mq_queue_enter(q)) - return NULL; - - rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); - if (rq) - blk_mq_put_ctx(rq->mq_ctx); - return rq; -} -EXPORT_SYMBOL(blk_mq_alloc_reserved_request); - static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b9a74a386dbc..2bd82f399128 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -160,8 +160,8 @@ void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); -struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, + gfp_t gfp, bool reserved); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -- cgit v1.2.3 From cdef54dd85ad66e77262ea57796a3e81683dd5d6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 28 May 2014 18:11:06 +0200 Subject: blk-mq: remove alloc_hctx and free_hctx methods There is no need for drivers to control hardware context allocation now that we do the context to node mapping in common code. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 26 +++++--------------------- drivers/block/null_blk.c | 28 +--------------------------- drivers/block/virtio_blk.c | 2 -- include/linux/blk-mq.h | 10 ---------- 4 files changed, 6 insertions(+), 60 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 5cc4b871cb11..f27fe44230c2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} -EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); - -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, - unsigned int hctx_index) -{ - kfree(hctx); -} -EXPORT_SYMBOL(blk_mq_free_single_hw_queue); - static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q, queue_for_each_hw_ctx(q, hctx, i) { free_cpumask_var(hctx->cpumask); - set->ops->free_hctx(hctx, i); + kfree(hctx); } } @@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) { int node = blk_mq_hw_queue_to_node(map, i); - hctxs[i] = set->ops->alloc_hctx(set, i, node); + hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL, node); if (!hctxs[i]) goto err_hctxs; @@ -1898,7 +1884,7 @@ err_hctxs: if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); - set->ops->free_hctx(hctxs[i], i); + kfree(hctxs[i]); } err_map: kfree(hctxs); @@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; - if (!set->nr_hw_queues || - !set->ops->queue_rq || !set->ops->map_queue || - !set->ops->alloc_hctx || !set->ops->free_hctx) + if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 4d33c8c25fbf..b40af63a5476 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} - -static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) -{ - kfree(hctx); -} - static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { BUG_ON(!nullb); @@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = { .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, -}; - -static struct blk_mq_ops null_mq_ops_pernode = { - .queue_rq = null_queue_rq, - .map_queue = blk_mq_map_queue, - .init_hctx = null_init_hctx, - .complete = null_softirq_done_fn, - .alloc_hctx = null_alloc_hctx, - .free_hctx = null_free_hctx, }; static void null_del_dev(struct nullb *nullb) @@ -496,10 +473,7 @@ static int null_add_dev(void) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - if (use_per_node_hctx) - nullb->tag_set.ops = &null_mq_ops_pernode; - else - nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7a51f065edcd..16c21c0cb14d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq, static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .map_queue = blk_mq_map_queue, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, .complete = virtblk_request_done, .init_request = virtblk_init_request, }; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2bd82f399128..91dfb75ce39f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -79,9 +79,6 @@ struct blk_mq_tag_set { typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); -typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, - unsigned int, int); -typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_request_fn)(void *, struct request *, unsigned int, @@ -107,12 +104,6 @@ struct blk_mq_ops { softirq_done_fn *complete; - /* - * Override for hctx allocations (should probably go) - */ - alloc_hctx_fn *alloc_hctx; - free_hctx_fn *free_hctx; - /* * Called when the block layer side of a hardware queue has been * set up, allowing the driver to allocate/init matching structures. @@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); -- cgit v1.2.3 From d660e92a97aac08aa33cd41e00a325066e00f1ef Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 27 May 2014 17:37:29 +0530 Subject: regulators: Add definition of regulator_set_voltage_time() for !CONFIG_REGULATOR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already have dummy implementation for most of the regulators APIs for !CONFIG_REGULATOR case and were missing it for regulator_set_voltage_time(). Found this issue while compiling cpufreq-cpu0 driver without regulators support in kernel. drivers/cpufreq/cpufreq-cpu0.c: In function ‘cpu0_cpufreq_probe’: drivers/cpufreq/cpufreq-cpu0.c:186:3: error: implicit declaration of function ‘regulator_set_voltage_time’ [-Werror=implicit-function-declaration] Fix this by adding dummy definition for regulator_set_voltage_time(). Signed-off-by: Viresh Kumar Signed-off-by: Mark Brown --- include/linux/regulator/consumer.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index e530681bea70..94719e8dce04 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -397,6 +397,12 @@ static inline int regulator_set_voltage(struct regulator *regulator, return 0; } +static inline int regulator_set_voltage_time(struct regulator *regulator, + int old_uV, int new_uV) +{ + return 0; +} + static inline int regulator_get_voltage(struct regulator *regulator) { return -EINVAL; -- cgit v1.2.3 From 357d596ea7bea5abf1479cc72ae5888c738717dd Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 28 May 2014 11:35:41 -0700 Subject: Revert "usb: gadget: net2280: Add support for PLX USB338X" This reverts commit c4128cac3557ddd5fa972cb6511c426cd94a7ccd. This should come through Felipe's tree first, and there was a bunch of other patches that are needed after this one as well that I didn't have. Cc: Ricardo Ribalda Delgado Cc: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/Kconfig | 10 +- drivers/usb/gadget/net2280.c | 1115 ++++-------------------------------------- drivers/usb/gadget/net2280.h | 97 +--- include/linux/usb/usb338x.h | 199 -------- 4 files changed, 91 insertions(+), 1330 deletions(-) delete mode 100644 include/linux/usb/usb338x.h (limited to 'include/linux') diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 49e434ec527d..ba18e9c110cc 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -409,7 +409,7 @@ config USB_NET2272_DMA If unsure, say "N" here. The driver works fine in PIO mode. config USB_NET2280 - tristate "NetChip 228x / PLX USB338x" + tristate "NetChip 228x" depends on PCI help NetChip 2280 / 2282 is a PCI based USB peripheral controller which @@ -419,14 +419,6 @@ config USB_NET2280 (for control transfers) and several endpoints with dedicated functions. - PLX 3380 / 3382 is a PCIe based USB peripheral controller which - supports full, high speed USB 2.0 and super speed USB 3.0 - data transfers. - - It has eight configurable endpoints, as well as endpoint zero - (for control transfers) and several endpoints with dedicated - functions. - Say "y" to link the driver statically, or "m" to build a dynamically linked module called "net2280" and force all gadget drivers to also be dynamically linked. diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 87789c9bf7fe..300b3a71383b 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -18,9 +18,6 @@ * hint to completely eliminate some IRQs, if a later IRQ is guaranteed * and DMA chaining is enabled. * - * MSI is enabled by default. The legacy IRQ is used if MSI couldn't - * be enabled. - * * Note that almost all the errata workarounds here are only needed for * rev1 chips. Rev1a silicon (0110) fixes almost all of them. */ @@ -28,14 +25,10 @@ /* * Copyright (C) 2003 David Brownell * Copyright (C) 2003-2005 PLX Technology, Inc. - * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS * * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility * with 2282 chip * - * Modified Ricardo Ribalda Qtechnology AS to provide compatibility - * with usb 338x chip. Based on PLX driver - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -68,8 +61,9 @@ #include #include -#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" -#define DRIVER_VERSION "2005 Sept 27/v3.0" + +#define DRIVER_DESC "PLX NET228x USB Peripheral Controller" +#define DRIVER_VERSION "2005 Sept 27" #define EP_DONTUSE 13 /* nonzero */ @@ -79,12 +73,11 @@ static const char driver_name [] = "net2280"; static const char driver_desc [] = DRIVER_DESC; -static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; static const char ep0name [] = "ep0"; static const char *const ep_name [] = { ep0name, "ep-a", "ep-b", "ep-c", "ep-d", - "ep-e", "ep-f", "ep-g", "ep-h", + "ep-e", "ep-f", }; /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO) @@ -97,12 +90,11 @@ static const char *const ep_name [] = { */ static bool use_dma = 1; static bool use_dma_chaining = 0; -static bool use_msi = 1; /* "modprobe net2280 use_dma=n" etc */ module_param (use_dma, bool, S_IRUGO); module_param (use_dma_chaining, bool, S_IRUGO); -module_param(use_msi, bool, S_IRUGO); + /* mode 0 == ep-{a,b,c,d} 1K fifo each * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable @@ -156,9 +148,6 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) struct net2280_ep *ep; u32 max, tmp; unsigned long flags; - static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; - static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80, - 0x50, 0x20, 0x70, 0x40, 0x90 }; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || !desc || ep->desc || _ep->name == ep0name @@ -172,20 +161,11 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) return -EDOM; - if (dev->pdev->vendor == 0x10b5) { - if ((desc->bEndpointAddress & 0x0f) >= 0x0c) - return -EDOM; - ep->is_in = !!usb_endpoint_dir_in(desc); - if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) - return -EINVAL; - } - /* sanity check ep-e/ep-f since their fifos are small */ max = usb_endpoint_maxp (desc) & 0x1fff; - if (ep->num > 4 && max > 64 && (dev->pdev->vendor == 0x17cc)) + if (ep->num > 4 && max > 64) return -ERANGE; - spin_lock_irqsave (&dev->lock, flags); _ep->maxpacket = max & 0x7ff; ep->desc = desc; @@ -196,8 +176,7 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) ep->out_overflow = 0; /* set speed-dependent max packet; may kick in high bandwidth */ - set_idx_reg(dev->regs, (dev->enhanced_mode) ? ep_enhanced[ep->num] - : REG_EP_MAXPKT(dev, ep->num), max); + set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max); /* FIFO lines can't go to different packets. PIO is ok, so * use it instead of troublesome (non-bulk) multi-packet DMA. @@ -220,43 +199,23 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) &ep->regs->ep_rsp); } else if (tmp == USB_ENDPOINT_XFER_BULK) { /* catch some particularly blatant driver bugs */ - if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) || - (dev->gadget.speed == USB_SPEED_HIGH && max != 512) || - (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { - spin_unlock_irqrestore(&dev->lock, flags); + if ((dev->gadget.speed == USB_SPEED_HIGH + && max != 512) + || (dev->gadget.speed == USB_SPEED_FULL + && max > 64)) { + spin_unlock_irqrestore (&dev->lock, flags); return -ERANGE; } } ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0; - /* Enable this endpoint */ - if (dev->pdev->vendor == 0x17cc) { - tmp <<= ENDPOINT_TYPE; - tmp |= desc->bEndpointAddress; - /* default full fifo lines */ - tmp |= (4 << ENDPOINT_BYTE_COUNT); - tmp |= 1 << ENDPOINT_ENABLE; - ep->is_in = (tmp & USB_DIR_IN) != 0; - } else { - /* In Legacy mode, only OUT endpoints are used */ - if (dev->enhanced_mode && ep->is_in) { - tmp <<= IN_ENDPOINT_TYPE; - tmp |= (1 << IN_ENDPOINT_ENABLE); - /* Not applicable to Legacy */ - tmp |= (1 << ENDPOINT_DIRECTION); - } else { - tmp <<= OUT_ENDPOINT_TYPE; - tmp |= (1 << OUT_ENDPOINT_ENABLE); - tmp |= (ep->is_in << ENDPOINT_DIRECTION); - } - - tmp |= usb_endpoint_num(desc); - tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); - } - - /* Make sure all the registers are written before ep_rsp*/ - wmb(); + tmp <<= ENDPOINT_TYPE; + tmp |= desc->bEndpointAddress; + tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */ + tmp |= 1 << ENDPOINT_ENABLE; + wmb (); /* for OUT transfers, block the rx fifo until a read is posted */ + ep->is_in = (tmp & USB_DIR_IN) != 0; if (!ep->is_in) writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); else if (dev->pdev->device != 0x2280) { @@ -267,13 +226,11 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); } - writel(tmp, &ep->cfg->ep_cfg); + writel (tmp, &ep->regs->ep_cfg); /* enable irqs */ if (!ep->dma) { /* pio, per-packet */ - tmp = (dev->pdev->vendor == 0x17cc)?(1 << ep->num) - : (1 << ep_bit[ep->num]); - tmp |= readl(&dev->regs->pciirqenb0); + tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0); writel (tmp, &dev->regs->pciirqenb0); tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) @@ -294,10 +251,8 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); writel (tmp, &ep->regs->ep_irqenb); - tmp = (dev->pdev->vendor == 0x17cc)?(1 << ep->num) - : (1 << ep_bit[ep->num]); - tmp |= readl(&dev->regs->pciirqenb0); - writel(tmp, &dev->regs->pciirqenb0); + tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0); + writel (tmp, &dev->regs->pciirqenb0); } } @@ -331,8 +286,7 @@ static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec) static const struct usb_ep_ops net2280_ep_ops; -static void ep_reset_228x(struct net2280_regs __iomem *regs, - struct net2280_ep *ep) +static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep) { u32 tmp; @@ -407,55 +361,6 @@ static void ep_reset_228x(struct net2280_regs __iomem *regs, /* fifo size is handled separately */ } -static void ep_reset_338x(struct net2280_regs __iomem *regs, - struct net2280_ep *ep) -{ - u32 tmp, dmastat; - - ep->desc = NULL; - INIT_LIST_HEAD(&ep->queue); - - usb_ep_set_maxpacket_limit(&ep->ep, ~0); - ep->ep.ops = &net2280_ep_ops; - - /* disable the dma, irqs, endpoint... */ - if (ep->dma) { - writel(0, &ep->dma->dmactl); - writel((1 << DMA_ABORT_DONE_INTERRUPT) | - (1 << DMA_PAUSE_DONE_INTERRUPT) | - (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT) | - (1 << DMA_TRANSACTION_DONE_INTERRUPT) - /* | (1 << DMA_ABORT) */ - , &ep->dma->dmastat); - - dmastat = readl(&ep->dma->dmastat); - if (dmastat == 0x5002) { - WARNING(ep->dev, "The dmastat return = %x!!\n", - dmastat); - writel(0x5a, &ep->dma->dmastat); - } - - tmp = readl(®s->pciirqenb0); - tmp &= ~(1 << ep_bit[ep->num]); - writel(tmp, ®s->pciirqenb0); - } else { - if (ep->num < 5) { - tmp = readl(®s->pciirqenb1); - tmp &= ~(1 << (8 + ep->num)); /* completion */ - writel(tmp, ®s->pciirqenb1); - } - } - writel(0, &ep->regs->ep_irqenb); - - writel((1 << SHORT_PACKET_OUT_DONE_INTERRUPT) | - (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | - (1 << FIFO_OVERFLOW) | - (1 << DATA_PACKET_RECEIVED_INTERRUPT) | - (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | - (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | - (1 << DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); -} - static void nuke (struct net2280_ep *); static int net2280_disable (struct usb_ep *_ep) @@ -469,17 +374,13 @@ static int net2280_disable (struct usb_ep *_ep) spin_lock_irqsave (&ep->dev->lock, flags); nuke (ep); - - if (ep->dev->pdev->vendor == 0x10b5) - ep_reset_338x(ep->dev->regs, ep); - else - ep_reset_228x(ep->dev->regs, ep); + ep_reset (ep->dev->regs, ep); VDEBUG (ep->dev, "disabled %s %s\n", ep->dma ? "dma" : "pio", _ep->name); /* synch memory views with the device */ - (void)readl(&ep->cfg->ep_cfg); + (void) readl (&ep->regs->ep_cfg); if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) ep->dma = &ep->dev->dma [ep->num - 1]; @@ -797,8 +698,6 @@ static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma) writel (readl (&dma->dmastat), &dma->dmastat); writel (td_dma, &dma->dmadesc); - if (ep->dev->pdev->vendor == 0x10b5) - dmactl |= (0x01 << DMA_REQUEST_OUTSTANDING); writel (dmactl, &dma->dmactl); /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ @@ -873,21 +772,6 @@ static void start_dma (struct net2280_ep *ep, struct net2280_request *req) start_queue (ep, tmp, req->td_dma); } -static inline void resume_dma(struct net2280_ep *ep) -{ - writel(readl(&ep->dma->dmactl) | (1 << DMA_ENABLE), &ep->dma->dmactl); - - ep->dma_started = true; -} - -static inline void ep_stop_dma(struct net2280_ep *ep) -{ - writel(readl(&ep->dma->dmactl) & ~(1 << DMA_ENABLE), &ep->dma->dmactl); - spin_stop_dma(ep->dma); - - ep->dma_started = false; -} - static inline void queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid) { @@ -990,23 +874,8 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) /* kickstart this i/o queue? */ if (list_empty (&ep->queue) && !ep->stopped) { - /* DMA request while EP halted */ - if (ep->dma && - (readl(&ep->regs->ep_rsp) & (1 << CLEAR_ENDPOINT_HALT)) && - (dev->pdev->vendor == 0x10b5)) { - int valid = 1; - if (ep->is_in) { - int expect; - expect = likely(req->req.zero || - ((req->req.length % - ep->ep.maxpacket) != 0)); - if (expect != ep->in_fifo_validate) - valid = 0; - } - queue_dma(ep, req, valid); - } /* use DMA if the endpoint supports it, else pio */ - else if (ep->dma) + if (ep->dma) start_dma (ep, req); else { /* maybe there's no control data, just status ack */ @@ -1124,8 +993,6 @@ static void scan_dma_completions (struct net2280_ep *ep) } else if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) { tmp = readl (&ep->regs->ep_stat); - if (ep->dev->pdev->vendor == 0x10b5) - return dma_done(ep, req, tmp, 0); /* AVOID TROUBLE HERE by not issuing short reads from * your gadget driver. That helps avoids errata 0121, @@ -1212,7 +1079,7 @@ static void restart_dma (struct net2280_ep *ep) start_queue (ep, dmactl, req->td_dma); } -static void abort_dma_228x(struct net2280_ep *ep) +static void abort_dma (struct net2280_ep *ep) { /* abort the current transfer */ if (likely (!list_empty (&ep->queue))) { @@ -1224,19 +1091,6 @@ static void abort_dma_228x(struct net2280_ep *ep) scan_dma_completions (ep); } -static void abort_dma_338x(struct net2280_ep *ep) -{ - writel((1 << DMA_ABORT), &ep->dma->dmastat); - spin_stop_dma(ep->dma); -} - -static void abort_dma(struct net2280_ep *ep) -{ - if (ep->dev->pdev->vendor == 0x17cc) - return abort_dma_228x(ep); - return abort_dma_338x(ep); -} - /* dequeue ALL requests */ static void nuke (struct net2280_ep *ep) { @@ -1390,9 +1244,6 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) ep->wedged = 1; } else { clear_halt (ep); - if (ep->dev->pdev->vendor == 0x10b5 && - !list_empty(&ep->queue) && ep->td_dma) - restart_dma(ep); ep->wedged = 0; } (void) readl (&ep->regs->ep_rsp); @@ -1516,13 +1367,10 @@ static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value) spin_lock_irqsave (&dev->lock, flags); tmp = readl (&dev->usb->usbctl); - if (value) { + if (value) tmp |= (1 << SELF_POWERED_STATUS); - dev->selfpowered = 1; - } else { + else tmp &= ~(1 << SELF_POWERED_STATUS); - dev->selfpowered = 0; - } writel (tmp, &dev->usb->usbctl); spin_unlock_irqrestore (&dev->lock, flags); @@ -1656,14 +1504,14 @@ static ssize_t registers_show(struct device *_dev, /* DMA Control Registers */ /* Configurable EP Control Registers */ - for (i = 0; i < dev->n_ep; i++) { + for (i = 0; i < 7; i++) { struct net2280_ep *ep; ep = &dev->ep [i]; if (i && !ep->desc) continue; - t1 = readl(&ep->cfg->ep_cfg); + t1 = readl (&ep->regs->ep_cfg); t2 = readl (&ep->regs->ep_rsp) & 0xff; t = scnprintf (next, size, "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" @@ -1723,7 +1571,7 @@ static ssize_t registers_show(struct device *_dev, t = scnprintf (next, size, "\nirqs: "); size -= t; next += t; - for (i = 0; i < dev->n_ep; i++) { + for (i = 0; i < 7; i++) { struct net2280_ep *ep; ep = &dev->ep [i]; @@ -1758,7 +1606,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, size = PAGE_SIZE; spin_lock_irqsave (&dev->lock, flags); - for (i = 0; i < dev->n_ep; i++) { + for (i = 0; i < 7; i++) { struct net2280_ep *ep = &dev->ep [i]; struct net2280_request *req; int t; @@ -1887,121 +1735,6 @@ static void set_fifo_mode (struct net2280 *dev, int mode) list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list); } -static void defect7374_disable_data_eps(struct net2280 *dev) -{ - /* - * For Defect 7374, disable data EPs (and more): - * - This phase undoes the earlier phase of the Defect 7374 workaround, - * returing ep regs back to normal. - */ - struct net2280_ep *ep; - int i; - unsigned char ep_sel; - u32 tmp_reg; - - for (i = 1; i < 5; i++) { - ep = &dev->ep[i]; - writel(0, &ep->cfg->ep_cfg); - } - - /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */ - for (i = 0; i < 6; i++) - writel(0, &dev->dep[i].dep_cfg); - - for (ep_sel = 0; ep_sel <= 21; ep_sel++) { - /* Select an endpoint for subsequent operations: */ - tmp_reg = readl(&dev->plregs->pl_ep_ctrl); - writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl); - - if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) || - ep_sel == 18 || ep_sel == 20) - continue; - - /* Change settings on some selected endpoints */ - tmp_reg = readl(&dev->plregs->pl_ep_cfg_4); - tmp_reg &= ~(1 << NON_CTRL_IN_TOLERATE_BAD_DIR); - writel(tmp_reg, &dev->plregs->pl_ep_cfg_4); - tmp_reg = readl(&dev->plregs->pl_ep_ctrl); - tmp_reg |= (1 << EP_INITIALIZED); - writel(tmp_reg, &dev->plregs->pl_ep_ctrl); - } -} - -static void defect7374_enable_data_eps_zero(struct net2280 *dev) -{ - u32 tmp = 0, tmp_reg; - u32 fsmvalue, scratch; - int i; - unsigned char ep_sel; - - scratch = get_idx_reg(dev->regs, SCRATCH); - fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); - scratch &= ~(0xf << DEFECT7374_FSM_FIELD); - - /*See if firmware needs to set up for workaround*/ - if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { - WARNING(dev, "Operate Defect 7374 workaround soft this time"); - WARNING(dev, "It will operate on cold-reboot and SS connect"); - - /*GPEPs:*/ - tmp = ((0 << ENDPOINT_NUMBER) | (1 << ENDPOINT_DIRECTION) | - (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | - ((dev->enhanced_mode) ? - 1 << OUT_ENDPOINT_ENABLE : 1 << ENDPOINT_ENABLE) | - (1 << IN_ENDPOINT_ENABLE)); - - for (i = 1; i < 5; i++) - writel(tmp, &dev->ep[i].cfg->ep_cfg); - - /* CSRIN, PCIIN, STATIN, RCIN*/ - tmp = ((0 << ENDPOINT_NUMBER) | (1 << ENDPOINT_ENABLE)); - writel(tmp, &dev->dep[1].dep_cfg); - writel(tmp, &dev->dep[3].dep_cfg); - writel(tmp, &dev->dep[4].dep_cfg); - writel(tmp, &dev->dep[5].dep_cfg); - - /*Implemented for development and debug. - * Can be refined/tuned later.*/ - for (ep_sel = 0; ep_sel <= 21; ep_sel++) { - /* Select an endpoint for subsequent operations: */ - tmp_reg = readl(&dev->plregs->pl_ep_ctrl); - writel(((tmp_reg & ~0x1f) | ep_sel), - &dev->plregs->pl_ep_ctrl); - - if (ep_sel == 1) { - tmp = - (readl(&dev->plregs->pl_ep_ctrl) | - (1 << CLEAR_ACK_ERROR_CODE) | 0); - writel(tmp, &dev->plregs->pl_ep_ctrl); - continue; - } - - if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || - ep_sel == 18 || ep_sel == 20) - continue; - - tmp = (readl(&dev->plregs->pl_ep_cfg_4) | - (1 << NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); - writel(tmp, &dev->plregs->pl_ep_cfg_4); - - tmp = readl(&dev->plregs->pl_ep_ctrl) & - ~(1 << EP_INITIALIZED); - writel(tmp, &dev->plregs->pl_ep_ctrl); - - } - - /* Set FSM to focus on the first Control Read: - * - Tip: Connection speed is known upon the first - * setup request.*/ - scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; - set_idx_reg(dev->regs, SCRATCH, scratch); - - } else{ - WARNING(dev, "Defect 7374 workaround soft will NOT operate"); - WARNING(dev, "It will operate on cold-reboot and SS connect"); - } -} - /* keeping it simple: * - one bus driver, initted first; * - one function driver, initted second @@ -2011,7 +1744,7 @@ static void defect7374_enable_data_eps_zero(struct net2280 *dev) * perhaps to bind specific drivers to specific devices. */ -static void usb_reset_228x(struct net2280 *dev) +static void usb_reset (struct net2280 *dev) { u32 tmp; @@ -2027,11 +1760,11 @@ static void usb_reset_228x(struct net2280 *dev) /* clear old dma and irq state */ for (tmp = 0; tmp < 4; tmp++) { - struct net2280_ep *ep = &dev->ep[tmp + 1]; + struct net2280_ep *ep = &dev->ep [tmp + 1]; + if (ep->dma) - abort_dma(ep); + abort_dma (ep); } - writel (~0, &dev->regs->irqstat0), writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), @@ -2047,67 +1780,7 @@ static void usb_reset_228x(struct net2280 *dev) set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0); } -static void usb_reset_338x(struct net2280 *dev) -{ - u32 tmp; - u32 fsmvalue; - - dev->gadget.speed = USB_SPEED_UNKNOWN; - (void)readl(&dev->usb->usbctl); - - net2280_led_init(dev); - - fsmvalue = get_idx_reg(dev->regs, SCRATCH) & - (0xf << DEFECT7374_FSM_FIELD); - - /* See if firmware needs to set up for workaround: */ - if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { - INFO(dev, "%s: Defect 7374 FsmValue 0x%08X\n", __func__, - fsmvalue); - } else { - /* disable automatic responses, and irqs */ - writel(0, &dev->usb->stdrsp); - writel(0, &dev->regs->pciirqenb0); - writel(0, &dev->regs->pciirqenb1); - } - - /* clear old dma and irq state */ - for (tmp = 0; tmp < 4; tmp++) { - struct net2280_ep *ep = &dev->ep[tmp + 1]; - - if (ep->dma) - abort_dma(ep); - } - - writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); - - if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { - /* reset, and enable pci */ - tmp = readl(&dev->regs->devinit) | - (1 << PCI_ENABLE) | - (1 << FIFO_SOFT_RESET) | - (1 << USB_SOFT_RESET) | - (1 << M8051_RESET); - - writel(tmp, &dev->regs->devinit); - } - - /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ - INIT_LIST_HEAD(&dev->gadget.ep_list); - - for (tmp = 1; tmp < dev->n_ep; tmp++) - list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); - -} - -static void usb_reset(struct net2280 *dev) -{ - if (dev->pdev->vendor == 0x17cc) - return usb_reset_228x(dev); - return usb_reset_338x(dev); -} - -static void usb_reinit_228x(struct net2280 *dev) +static void usb_reinit (struct net2280 *dev) { u32 tmp; int init_dma; @@ -2130,8 +1803,7 @@ static void usb_reinit_228x(struct net2280 *dev) } else ep->fifo_size = 64; ep->regs = &dev->epregs [tmp]; - ep->cfg = &dev->epregs[tmp]; - ep_reset_228x(dev->regs, ep); + ep_reset (dev->regs, ep); } usb_ep_set_maxpacket_limit(&dev->ep [0].ep, 64); usb_ep_set_maxpacket_limit(&dev->ep [5].ep, 64); @@ -2148,122 +1820,7 @@ static void usb_reinit_228x(struct net2280 *dev) writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg); } -static void usb_reinit_338x(struct net2280 *dev) -{ - int init_dma; - int i; - u32 tmp, val; - u32 fsmvalue; - static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; - static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, - 0x00, 0xC0, 0x00, 0xC0 }; - - /* use_dma changes are ignored till next device re-init */ - init_dma = use_dma; - - /* basic endpoint init */ - for (i = 0; i < dev->n_ep; i++) { - struct net2280_ep *ep = &dev->ep[i]; - - ep->ep.name = ep_name[i]; - ep->dev = dev; - ep->num = i; - - if (i > 0 && i <= 4 && init_dma) - ep->dma = &dev->dma[i - 1]; - - if (dev->enhanced_mode) { - ep->cfg = &dev->epregs[ne[i]]; - ep->regs = (struct net2280_ep_regs __iomem *) - (((void *)&dev->epregs[ne[i]]) + - ep_reg_addr[i]); - ep->fiforegs = &dev->fiforegs[i]; - } else { - ep->cfg = &dev->epregs[i]; - ep->regs = &dev->epregs[i]; - ep->fiforegs = &dev->fiforegs[i]; - } - - ep->fifo_size = (i != 0) ? 2048 : 512; - - ep_reset_338x(dev->regs, ep); - } - usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); - - dev->gadget.ep0 = &dev->ep[0].ep; - dev->ep[0].stopped = 0; - - /* Link layer set up */ - fsmvalue = get_idx_reg(dev->regs, SCRATCH) & - (0xf << DEFECT7374_FSM_FIELD); - - /* See if driver needs to set up for workaround: */ - if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) - INFO(dev, "%s: Defect 7374 FsmValue %08x\n", - __func__, fsmvalue); - else { - tmp = readl(&dev->usb_ext->usbctl2) & - ~((1 << U1_ENABLE) | (1 << U2_ENABLE) | (1 << LTM_ENABLE)); - writel(tmp, &dev->usb_ext->usbctl2); - } - - /* Hardware Defect and Workaround */ - val = readl(&dev->ll_lfps_regs->ll_lfps_5); - val &= ~(0xf << TIMER_LFPS_6US); - val |= 0x5 << TIMER_LFPS_6US; - writel(val, &dev->ll_lfps_regs->ll_lfps_5); - - val = readl(&dev->ll_lfps_regs->ll_lfps_6); - val &= ~(0xffff << TIMER_LFPS_80US); - val |= 0x0100 << TIMER_LFPS_80US; - writel(val, &dev->ll_lfps_regs->ll_lfps_6); - - /* - * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB - * Hot Reset Exit Handshake may Fail in Specific Case using - * Default Register Settings. Workaround for Enumeration test. - */ - val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); - val &= ~(0x1f << HOT_TX_NORESET_TS2); - val |= 0x10 << HOT_TX_NORESET_TS2; - writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); - - val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); - val &= ~(0x1f << HOT_RX_RESET_TS2); - val |= 0x3 << HOT_RX_RESET_TS2; - writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); - - /* - * Set Recovery Idle to Recover bit: - * - On SS connections, setting Recovery Idle to Recover Fmw improves - * link robustness with various hosts and hubs. - * - It is safe to set for all connection speeds; all chip revisions. - * - R-M-W to leave other bits undisturbed. - * - Reference PLX TT-7372 - */ - val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); - val |= (1 << RECOVERY_IDLE_TO_RECOVER_FMW); - writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); - - INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); - - /* disable dedicated endpoints */ - writel(0x0D, &dev->dep[0].dep_cfg); - writel(0x0D, &dev->dep[1].dep_cfg); - writel(0x0E, &dev->dep[2].dep_cfg); - writel(0x0E, &dev->dep[3].dep_cfg); - writel(0x0F, &dev->dep[4].dep_cfg); - writel(0x0C, &dev->dep[5].dep_cfg); -} - -static void usb_reinit(struct net2280 *dev) -{ - if (dev->pdev->vendor == 0x17cc) - return usb_reinit_228x(dev); - return usb_reinit_338x(dev); -} - -static void ep0_start_228x(struct net2280 *dev) +static void ep0_start (struct net2280 *dev) { writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE) | (1 << CLEAR_NAK_OUT_PACKETS) @@ -2306,61 +1863,6 @@ static void ep0_start_228x(struct net2280 *dev) (void) readl (&dev->usb->usbctl); } -static void ep0_start_338x(struct net2280 *dev) -{ - u32 fsmvalue; - - fsmvalue = get_idx_reg(dev->regs, SCRATCH) & - (0xf << DEFECT7374_FSM_FIELD); - - if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) - INFO(dev, "%s: Defect 7374 FsmValue %08x\n", __func__, - fsmvalue); - else - writel((1 << CLEAR_NAK_OUT_PACKETS_MODE) | - (1 << SET_EP_HIDE_STATUS_PHASE), - &dev->epregs[0].ep_rsp); - - /* - * hardware optionally handles a bunch of standard requests - * that the API hides from drivers anyway. have it do so. - * endpoint status/features are handled in software, to - * help pass tests for some dubious behavior. - */ - writel((1 << SET_ISOCHRONOUS_DELAY) | - (1 << SET_SEL) | - (1 << SET_TEST_MODE) | - (1 << SET_ADDRESS) | - (1 << GET_INTERFACE_STATUS) | - (1 << GET_DEVICE_STATUS), - &dev->usb->stdrsp); - dev->wakeup_enable = 1; - writel((1 << USB_ROOT_PORT_WAKEUP_ENABLE) | - (dev->softconnect << USB_DETECT_ENABLE) | - (1 << DEVICE_REMOTE_WAKEUP_ENABLE), - &dev->usb->usbctl); - - /* enable irqs so we can see ep0 and general operation */ - writel((1 << SETUP_PACKET_INTERRUPT_ENABLE) | - (1 << ENDPOINT_0_INTERRUPT_ENABLE) - , &dev->regs->pciirqenb0); - writel((1 << PCI_INTERRUPT_ENABLE) | - (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | - (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) | - (1 << VBUS_INTERRUPT_ENABLE), - &dev->regs->pciirqenb1); - - /* don't leave any writes posted */ - (void)readl(&dev->usb->usbctl); -} - -static void ep0_start(struct net2280 *dev) -{ - if (dev->pdev->vendor == 0x17cc) - return ep0_start_228x(dev); - return ep0_start_338x(dev); -} - /* when a driver is successfully registered, it will receive * control requests including set_configuration(), which enables * non-control requests. then usb traffic follows until a @@ -2384,7 +1886,7 @@ static int net2280_start(struct usb_gadget *_gadget, dev = container_of (_gadget, struct net2280, gadget); - for (i = 0; i < dev->n_ep; i++) + for (i = 0; i < 7; i++) dev->ep [i].irqs = 0; /* hook up the driver ... */ @@ -2398,17 +1900,13 @@ static int net2280_start(struct usb_gadget *_gadget, if (retval) goto err_func; /* Enable force-full-speed testing mode, if desired */ - if (full_speed && dev->pdev->vendor == 0x17cc) + if (full_speed) writel(1 << FORCE_FULL_SPEED_MODE, &dev->usb->xcvrdiag); /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. */ net2280_led_active (dev, 1); - - if (dev->pdev->vendor == 0x10b5) - defect7374_enable_data_eps_zero(dev); - ep0_start (dev); DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n", @@ -2439,7 +1937,7 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) * and kill any outstanding requests. */ usb_reset (dev); - for (i = 0; i < dev->n_ep; i++) + for (i = 0; i < 7; i++) nuke (&dev->ep [i]); /* report disconnect; the driver is already quiesced */ @@ -2469,8 +1967,7 @@ static int net2280_stop(struct usb_gadget *_gadget, net2280_led_active (dev, 0); /* Disable full-speed test mode */ - if (dev->pdev->vendor == 0x17cc) - writel(0, &dev->usb->xcvrdiag); + writel(0, &dev->usb->xcvrdiag); device_remove_file (&dev->pdev->dev, &dev_attr_function); device_remove_file (&dev->pdev->dev, &dev_attr_queues); @@ -2722,350 +2219,6 @@ get_ep_by_addr (struct net2280 *dev, u16 wIndex) return NULL; } -static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) -{ - u32 scratch, fsmvalue; - u32 ack_wait_timeout, state; - - /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */ - scratch = get_idx_reg(dev->regs, SCRATCH); - fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); - scratch &= ~(0xf << DEFECT7374_FSM_FIELD); - - if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) && - (r.bRequestType & USB_DIR_IN))) - return; - - /* This is the first Control Read for this connection: */ - if (!(readl(&dev->usb->usbstat) & (1 << SUPER_SPEED_MODE))) { - /* - * Connection is NOT SS: - * - Connection must be FS or HS. - * - This FSM state should allow workaround software to - * run after the next USB connection. - */ - scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; - goto restore_data_eps; - } - - /* Connection is SS: */ - for (ack_wait_timeout = 0; - ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS; - ack_wait_timeout++) { - - state = readl(&dev->plregs->pl_ep_status_1) - & (0xff << STATE); - if ((state >= (ACK_GOOD_NORMAL << STATE)) && - (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { - scratch |= DEFECT7374_FSM_SS_CONTROL_READ; - break; - } - - /* - * We have not yet received host's Data Phase ACK - * - Wait and try again. - */ - udelay(DEFECT_7374_PROCESSOR_WAIT_TIME); - - continue; - } - - - if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) { - ERROR(dev, "FAIL: Defect 7374 workaround waited but failed"); - ERROR(dev, "to detect SS host's data phase ACK."); - ERROR(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"); - ERROR(dev, "got 0x%2.2x.\n", state >> STATE); - } else { - WARNING(dev, "INFO: Defect 7374 workaround waited about\n"); - WARNING(dev, "%duSec for Control Read Data Phase ACK\n", - DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout); - } - -restore_data_eps: - /* - * Restore data EPs to their pre-workaround settings (disabled, - * initialized, and other details). - */ - defect7374_disable_data_eps(dev); - - set_idx_reg(dev->regs, SCRATCH, scratch); - - return; -} - -static void ep_stall(struct net2280_ep *ep, int stall) -{ - struct net2280 *dev = ep->dev; - u32 val; - static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; - - if (stall) { - writel((1 << SET_ENDPOINT_HALT) | - /* (1 << SET_NAK_PACKETS) | */ - (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), - &ep->regs->ep_rsp); - ep->is_halt = 1; - } else { - if (dev->gadget.speed == USB_SPEED_SUPER) { - /* - * Workaround for SS SeqNum not cleared via - * Endpoint Halt (Clear) bit. select endpoint - */ - val = readl(&dev->plregs->pl_ep_ctrl); - val = (val & ~0x1f) | ep_pl[ep->num]; - writel(val, &dev->plregs->pl_ep_ctrl); - - val |= (1 << SEQUENCE_NUMBER_RESET); - writel(val, &dev->plregs->pl_ep_ctrl); - } - val = readl(&ep->regs->ep_rsp); - val |= (1 << CLEAR_ENDPOINT_HALT) | - (1 << CLEAR_ENDPOINT_TOGGLE); - writel(val - /* | (1 << CLEAR_NAK_PACKETS)*/ - , &ep->regs->ep_rsp); - ep->is_halt = 0; - val = readl(&ep->regs->ep_rsp); - } -} - -static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged) -{ - /* set/clear, then synch memory views with the device */ - if (value) { - ep->stopped = 1; - if (ep->num == 0) - ep->dev->protocol_stall = 1; - else { - if (ep->dma) - ep_stop_dma(ep); - ep_stall(ep, true); - } - - if (wedged) - ep->wedged = 1; - } else { - ep->stopped = 0; - ep->wedged = 0; - - ep_stall(ep, false); - - /* Flush the queue */ - if (!list_empty(&ep->queue)) { - struct net2280_request *req = - list_entry(ep->queue.next, struct net2280_request, - queue); - if (ep->dma) - resume_dma(ep); - else { - if (ep->is_in) - write_fifo(ep, &req->req); - else { - if (read_fifo(ep, req)) - done(ep, req, 0); - } - } - } - } -} - -static void handle_stat0_irqs_superspeed(struct net2280 *dev, - struct net2280_ep *ep, struct usb_ctrlrequest r) -{ - int tmp = 0; - -#define w_value le16_to_cpu(r.wValue) -#define w_index le16_to_cpu(r.wIndex) -#define w_length le16_to_cpu(r.wLength) - - switch (r.bRequest) { - struct net2280_ep *e; - u16 status; - - case USB_REQ_SET_CONFIGURATION: - dev->addressed_state = !w_value; - goto usb3_delegate; - - case USB_REQ_GET_STATUS: - switch (r.bRequestType) { - case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): - status = dev->wakeup_enable ? 0x02 : 0x00; - if (dev->selfpowered) - status |= 1 << 0; - status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | - dev->ltm_enable << 4); - writel(0, &dev->epregs[0].ep_irqenb); - set_fifo_bytecount(ep, sizeof(status)); - writel((__force u32) status, &dev->epregs[0].ep_data); - allow_status_338x(ep); - break; - - case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): - e = get_ep_by_addr(dev, w_index); - if (!e) - goto do_stall3; - status = readl(&e->regs->ep_rsp) & - (1 << CLEAR_ENDPOINT_HALT); - writel(0, &dev->epregs[0].ep_irqenb); - set_fifo_bytecount(ep, sizeof(status)); - writel((__force u32) status, &dev->epregs[0].ep_data); - allow_status_338x(ep); - break; - - default: - goto usb3_delegate; - } - break; - - case USB_REQ_CLEAR_FEATURE: - switch (r.bRequestType) { - case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): - if (!dev->addressed_state) { - switch (w_value) { - case USB_DEVICE_U1_ENABLE: - dev->u1_enable = 0; - writel(readl(&dev->usb_ext->usbctl2) & - ~(1 << U1_ENABLE), - &dev->usb_ext->usbctl2); - allow_status_338x(ep); - goto next_endpoints3; - - case USB_DEVICE_U2_ENABLE: - dev->u2_enable = 0; - writel(readl(&dev->usb_ext->usbctl2) & - ~(1 << U2_ENABLE), - &dev->usb_ext->usbctl2); - allow_status_338x(ep); - goto next_endpoints3; - - case USB_DEVICE_LTM_ENABLE: - dev->ltm_enable = 0; - writel(readl(&dev->usb_ext->usbctl2) & - ~(1 << LTM_ENABLE), - &dev->usb_ext->usbctl2); - allow_status_338x(ep); - goto next_endpoints3; - - default: - break; - } - } - if (w_value == USB_DEVICE_REMOTE_WAKEUP) { - dev->wakeup_enable = 0; - writel(readl(&dev->usb->usbctl) & - ~(1 << DEVICE_REMOTE_WAKEUP_ENABLE), - &dev->usb->usbctl); - allow_status_338x(ep); - break; - } - goto usb3_delegate; - - case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): - e = get_ep_by_addr(dev, w_index); - if (!e) - goto do_stall3; - if (w_value != USB_ENDPOINT_HALT) - goto do_stall3; - VDEBUG(dev, "%s clear halt\n", e->ep.name); - ep_stall(e, false); - if (!list_empty(&e->queue) && e->td_dma) - restart_dma(e); - allow_status(ep); - ep->stopped = 1; - break; - - default: - goto usb3_delegate; - } - break; - case USB_REQ_SET_FEATURE: - switch (r.bRequestType) { - case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE): - if (!dev->addressed_state) { - switch (w_value) { - case USB_DEVICE_U1_ENABLE: - dev->u1_enable = 1; - writel(readl(&dev->usb_ext->usbctl2) | - (1 << U1_ENABLE), - &dev->usb_ext->usbctl2); - allow_status_338x(ep); - goto next_endpoints3; - - case USB_DEVICE_U2_ENABLE: - dev->u2_enable = 1; - writel(readl(&dev->usb_ext->usbctl2) | - (1 << U2_ENABLE), - &dev->usb_ext->usbctl2); - allow_status_338x(ep); - goto next_endpoints3; - - case USB_DEVICE_LTM_ENABLE: - dev->ltm_enable = 1; - writel(readl(&dev->usb_ext->usbctl2) | - (1 << LTM_ENABLE), - &dev->usb_ext->usbctl2); - allow_status_338x(ep); - goto next_endpoints3; - default: - break; - } - } - - if (w_value == USB_DEVICE_REMOTE_WAKEUP) { - dev->wakeup_enable = 1; - writel(readl(&dev->usb->usbctl) | - (1 << DEVICE_REMOTE_WAKEUP_ENABLE), - &dev->usb->usbctl); - allow_status_338x(ep); - break; - } - goto usb3_delegate; - - case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): - e = get_ep_by_addr(dev, w_index); - if (!e || (w_value != USB_ENDPOINT_HALT)) - goto do_stall3; - ep_stdrsp(e, true, false); - allow_status_338x(ep); - break; - - default: - goto usb3_delegate; - } - - break; - default: - -usb3_delegate: - VDEBUG(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n", - r.bRequestType, r.bRequest, - w_value, w_index, w_length, - readl(&ep->cfg->ep_cfg)); - - ep->responded = 0; - spin_unlock(&dev->lock); - tmp = dev->driver->setup(&dev->gadget, &r); - spin_lock(&dev->lock); - } -do_stall3: - if (tmp < 0) { - VDEBUG(dev, "req %02x.%02x protocol STALL; stat %d\n", - r.bRequestType, r.bRequest, tmp); - dev->protocol_stall = 1; - /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ - ep_stall(ep, true); - } - -next_endpoints3: - -#undef w_value -#undef w_index -#undef w_length - - return; -} - static void handle_stat0_irqs (struct net2280 *dev, u32 stat) { struct net2280_ep *ep; @@ -3087,20 +2240,10 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) struct net2280_request *req; if (dev->gadget.speed == USB_SPEED_UNKNOWN) { - u32 val = readl(&dev->usb->usbstat); - if (val & (1 << SUPER_SPEED)) { - dev->gadget.speed = USB_SPEED_SUPER; - usb_ep_set_maxpacket_limit(&dev->ep[0].ep, - EP0_SS_MAX_PACKET_SIZE); - } else if (val & (1 << HIGH_SPEED)) { + if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED)) dev->gadget.speed = USB_SPEED_HIGH; - usb_ep_set_maxpacket_limit(&dev->ep[0].ep, - EP0_HS_MAX_PACKET_SIZE); - } else { + else dev->gadget.speed = USB_SPEED_FULL; - usb_ep_set_maxpacket_limit(&dev->ep[0].ep, - EP0_HS_MAX_PACKET_SIZE); - } net2280_led_speed (dev, dev->gadget.speed); DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed)); } @@ -3118,38 +2261,32 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) } ep->stopped = 0; dev->protocol_stall = 0; - if (dev->pdev->vendor == 0x10b5) - ep->is_halt = 0; - else{ - if (ep->dev->pdev->device == 0x2280) - tmp = (1 << FIFO_OVERFLOW) | - (1 << FIFO_UNDERFLOW); - else - tmp = 0; - - writel(tmp | (1 << TIMEOUT) | - (1 << USB_STALL_SENT) | - (1 << USB_IN_NAK_SENT) | - (1 << USB_IN_ACK_RCVD) | - (1 << USB_OUT_PING_NAK_SENT) | - (1 << USB_OUT_ACK_SENT) | - (1 << SHORT_PACKET_OUT_DONE_INTERRUPT) | - (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | - (1 << DATA_PACKET_RECEIVED_INTERRUPT) | - (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | - (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | - (1 << DATA_IN_TOKEN_INTERRUPT) - , &ep->regs->ep_stat); - } - u.raw[0] = readl(&dev->usb->setup0123); - u.raw[1] = readl(&dev->usb->setup4567); + + if (ep->dev->pdev->device == 0x2280) + tmp = (1 << FIFO_OVERFLOW) + | (1 << FIFO_UNDERFLOW); + else + tmp = 0; + + writel (tmp | (1 << TIMEOUT) + | (1 << USB_STALL_SENT) + | (1 << USB_IN_NAK_SENT) + | (1 << USB_IN_ACK_RCVD) + | (1 << USB_OUT_PING_NAK_SENT) + | (1 << USB_OUT_ACK_SENT) + | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT) + | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) + | (1 << DATA_PACKET_RECEIVED_INTERRUPT) + | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) + | (1 << DATA_OUT_PING_TOKEN_INTERRUPT) + | (1 << DATA_IN_TOKEN_INTERRUPT) + , &ep->regs->ep_stat); + u.raw [0] = readl (&dev->usb->setup0123); + u.raw [1] = readl (&dev->usb->setup4567); cpu_to_le32s (&u.raw [0]); cpu_to_le32s (&u.raw [1]); - if (dev->pdev->vendor == 0x10b5) - defect7374_workaround(dev, u.r); - tmp = 0; #define w_value le16_to_cpu(u.r.wValue) @@ -3181,12 +2318,6 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) * everything else goes uplevel to the gadget code. */ ep->responded = 1; - - if (dev->gadget.speed == USB_SPEED_SUPER) { - handle_stat0_irqs_superspeed(dev, ep, u.r); - goto next_endpoints; - } - switch (u.r.bRequest) { case USB_REQ_GET_STATUS: { struct net2280_ep *e; @@ -3229,11 +2360,8 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) VDEBUG(dev, "%s wedged, halt not cleared\n", ep->ep.name); } else { - VDEBUG(dev, "%s clear halt\n", e->ep.name); + VDEBUG(dev, "%s clear halt\n", ep->ep.name); clear_halt(e); - if (ep->dev->pdev->vendor == 0x10b5 && - !list_empty(&e->queue) && e->td_dma) - restart_dma(e); } allow_status (ep); goto next_endpoints; @@ -3253,8 +2381,6 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat) if (e->ep.name == ep0name) goto do_stall; set_halt (e); - if (dev->pdev->vendor == 0x10b5 && e->dma) - abort_dma(e); allow_status (ep); VDEBUG (dev, "%s set halt\n", ep->ep.name); goto next_endpoints; @@ -3266,7 +2392,7 @@ delegate: "ep_cfg %08x\n", u.r.bRequestType, u.r.bRequest, w_value, w_index, w_length, - readl(&ep->cfg->ep_cfg)); + readl (&ep->regs->ep_cfg)); ep->responded = 0; spin_unlock (&dev->lock); tmp = dev->driver->setup (&dev->gadget, &u.r); @@ -3329,7 +2455,7 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat) /* after disconnect there's nothing else to do! */ tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); - mask = (1 << SUPER_SPEED) | (1 << HIGH_SPEED) | (1 << FULL_SPEED); + mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED); /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and @@ -3420,19 +2546,12 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat) tmp = readl (&dma->dmastat); writel (tmp, &dma->dmastat); - /* dma sync*/ - if (dev->pdev->vendor == 0x10b5) { - u32 r_dmacount = readl(&dma->dmacount); - if (!ep->is_in && (r_dmacount & 0x00FFFFFF) && - (tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))) - continue; - } - /* chaining should stop on abort, short OUT from fifo, * or (stat0 codepath) short OUT transfer. */ if (!use_dma_chaining) { - if (!(tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))) { + if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT)) + == 0) { DEBUG (ep->dev, "%s no xact done? %08x\n", ep->ep.name, tmp); continue; @@ -3506,8 +2625,7 @@ static irqreturn_t net2280_irq (int irq, void *_dev) struct net2280 *dev = _dev; /* shared interrupt, not ours */ - if (dev->pdev->vendor == 0x17cc && - (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))) + if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED))) return IRQ_NONE; spin_lock (&dev->lock); @@ -3518,13 +2636,6 @@ static irqreturn_t net2280_irq (int irq, void *_dev) /* control requests and PIO */ handle_stat0_irqs (dev, readl (&dev->regs->irqstat0)); - if (dev->pdev->vendor == 0x10b5) { - /* re-enable interrupt to trigger any possible new interrupt */ - u32 pciirqenb1 = readl(&dev->regs->pciirqenb1); - writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1); - writel(pciirqenb1, &dev->regs->pciirqenb1); - } - spin_unlock (&dev->lock); return IRQ_HANDLED; @@ -3563,8 +2674,6 @@ static void net2280_remove (struct pci_dev *pdev) } if (dev->got_irq) free_irq (pdev->irq, dev); - if (use_msi && dev->pdev->vendor == 0x10b5) - pci_disable_msi(pdev); if (dev->regs) iounmap (dev->regs); if (dev->region) @@ -3599,8 +2708,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init (&dev->lock); dev->pdev = pdev; dev->gadget.ops = &net2280_ops; - dev->gadget.max_speed = (dev->pdev->vendor == 0x10b5) ? - USB_SPEED_SUPER : USB_SPEED_HIGH; + dev->gadget.max_speed = USB_SPEED_HIGH; /* the "gadget" abstracts/virtualizes the controller */ dev->gadget.name = driver_name; @@ -3642,39 +2750,8 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); - if (dev->pdev->vendor == 0x10b5) { - u32 fsmvalue; - u32 usbstat; - dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *) - (base + 0x00b4); - dev->fiforegs = (struct usb338x_fifo_regs __iomem *) - (base + 0x0500); - dev->llregs = (struct usb338x_ll_regs __iomem *) - (base + 0x0700); - dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) - (base + 0x0748); - dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) - (base + 0x077c); - dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) - (base + 0x079c); - dev->plregs = (struct usb338x_pl_regs __iomem *) - (base + 0x0800); - usbstat = readl(&dev->usb->usbstat); - dev->enhanced_mode = (usbstat & (1 << 11)) ? 1 : 0; - dev->n_ep = (dev->enhanced_mode) ? 9 : 5; - /* put into initial config, link up all endpoints */ - fsmvalue = get_idx_reg(dev->regs, SCRATCH) & - (0xf << DEFECT7374_FSM_FIELD); - /* See if firmware needs to set up for workaround: */ - if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) - writel(0, &dev->usb->usbctl); - } else{ - dev->enhanced_mode = 0; - dev->n_ep = 7; - /* put into initial config, link up all endpoints */ - writel(0, &dev->usb->usbctl); - } - + /* put into initial config, link up all endpoints */ + writel (0, &dev->usb->usbctl); usb_reset (dev); usb_reinit (dev); @@ -3685,10 +2762,6 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) goto done; } - if (use_msi && dev->pdev->vendor == 0x10b5) - if (pci_enable_msi(pdev)) - ERROR(dev, "Failed to enable MSI mode\n"); - if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev) != 0) { ERROR (dev, "request interrupt %d failed\n", pdev->irq); @@ -3724,8 +2797,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) } /* enable lower-overhead pci memory bursts during DMA */ - if (dev->pdev->vendor == 0x17cc) - writel((1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) + writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) // 256 write retries may not be enough... // | (1 << PCI_RETRY_ABORT_ENABLE) | (1 << DMA_READ_MULTIPLE_ENABLE) @@ -3742,10 +2814,10 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) INFO (dev, "%s\n", driver_desc); INFO (dev, "irq %d, pci mem %p, chip rev %04x\n", pdev->irq, base, dev->chiprev); - INFO(dev, "version: " DRIVER_VERSION "; dma %s %s\n", - use_dma ? (use_dma_chaining ? "chaining" : "enabled") - : "disabled", - dev->enhanced_mode ? "enhanced mode" : "legacy mode"); + INFO (dev, "version: " DRIVER_VERSION "; dma %s\n", + use_dma + ? (use_dma_chaining ? "chaining" : "enabled") + : "disabled"); retval = device_create_file (&pdev->dev, &dev_attr_registers); if (retval) goto done; @@ -3777,8 +2849,7 @@ static void net2280_shutdown (struct pci_dev *pdev) writel (0, &dev->usb->usbctl); /* Disable full-speed test mode */ - if (dev->pdev->vendor == 0x17cc) - writel(0, &dev->usb->xcvrdiag); + writel(0, &dev->usb->xcvrdiag); } @@ -3798,24 +2869,8 @@ static const struct pci_device_id pci_ids [] = { { .device = 0x2282, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, -}, - { - .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), - .class_mask = ~0, - .vendor = 0x10b5, - .device = 0x3380, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, - { - .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), - .class_mask = ~0, - .vendor = 0x10b5, - .device = 0x3382, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, -{ /* end: all zeroes */ } + +}, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE (pci, pci_ids); diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h index f32c2746b6ae..a844be0d683a 100644 --- a/drivers/usb/gadget/net2280.h +++ b/drivers/usb/gadget/net2280.h @@ -6,7 +6,6 @@ /* * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell - * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,7 +14,6 @@ */ #include -#include /*-------------------------------------------------------------------------*/ @@ -61,13 +59,6 @@ set_idx_reg (struct net2280_regs __iomem *regs, u32 index, u32 value) #define CHIPREV_1 0x0100 #define CHIPREV_1A 0x0110 -/* DEFECT 7374 */ -#define DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS 200 -#define DEFECT_7374_PROCESSOR_WAIT_TIME 10 - -/* ep0 max packet size */ -#define EP0_SS_MAX_PACKET_SIZE 0x200 -#define EP0_HS_MAX_PACKET_SIZE 0x40 #ifdef __KERNEL__ /* ep a-f highspeed and fullspeed maxpacket, addresses @@ -94,15 +85,12 @@ struct net2280_dma { struct net2280_ep { struct usb_ep ep; - struct net2280_ep_regs __iomem *cfg; struct net2280_ep_regs __iomem *regs; struct net2280_dma_regs __iomem *dma; struct net2280_dma *dummy; - struct usb338x_fifo_regs __iomem *fiforegs; dma_addr_t td_dma; /* of dummy */ struct net2280 *dev; unsigned long irqs; - unsigned is_halt:1, dma_started:1; /* analogous to a host-side qh */ struct list_head queue; @@ -128,19 +116,10 @@ static inline void allow_status (struct net2280_ep *ep) ep->stopped = 1; } -static void allow_status_338x(struct net2280_ep *ep) +/* count (<= 4) bytes in the next fifo write will be valid */ +static inline void set_fifo_bytecount (struct net2280_ep *ep, unsigned count) { - /* - * Control Status Phase Handshake was set by the chip when the setup - * packet arrived. While set, the chip automatically NAKs the host's - * Status Phase tokens. - */ - writel(1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE, &ep->regs->ep_rsp); - - ep->stopped = 1; - - /* TD 9.9 Halt Endpoint test. TD 9.22 set feature test. */ - ep->responded = 0; + writeb (count, 2 + (u8 __iomem *) &ep->regs->ep_cfg); } struct net2280_request { @@ -156,38 +135,23 @@ struct net2280 { /* each pci device provides one gadget, several endpoints */ struct usb_gadget gadget; spinlock_t lock; - struct net2280_ep ep[9]; + struct net2280_ep ep [7]; struct usb_gadget_driver *driver; unsigned enabled : 1, protocol_stall : 1, softconnect : 1, got_irq : 1, - region:1, - u1_enable:1, - u2_enable:1, - ltm_enable:1, - wakeup_enable:1, - selfpowered:1, - addressed_state:1; + region : 1; u16 chiprev; - int enhanced_mode; - int n_ep; /* pci state used to access those endpoints */ struct pci_dev *pdev; struct net2280_regs __iomem *regs; struct net2280_usb_regs __iomem *usb; - struct usb338x_usb_ext_regs __iomem *usb_ext; struct net2280_pci_regs __iomem *pci; struct net2280_dma_regs __iomem *dma; struct net2280_dep_regs __iomem *dep; struct net2280_ep_regs __iomem *epregs; - struct usb338x_fifo_regs __iomem *fiforegs; - struct usb338x_ll_regs __iomem *llregs; - struct usb338x_ll_lfps_regs __iomem *ll_lfps_regs; - struct usb338x_ll_tsn_regs __iomem *ll_tsn_regs; - struct usb338x_ll_chi_regs __iomem *ll_chicken_reg; - struct usb338x_pl_regs __iomem *plregs; struct pci_pool *requests; // statistics... @@ -215,43 +179,6 @@ static inline void clear_halt (struct net2280_ep *ep) , &ep->regs->ep_rsp); } -/* - * FSM value for Defect 7374 (U1U2 Test) is managed in - * chip's SCRATCH register: - */ -#define DEFECT7374_FSM_FIELD 28 - -/* Waiting for Control Read: - * - A transition to this state indicates a fresh USB connection, - * before the first Setup Packet. The connection speed is not - * known. Firmware is waiting for the first Control Read. - * - Starting state: This state can be thought of as the FSM's typical - * starting state. - * - Tip: Upon the first SS Control Read the FSM never - * returns to this state. - */ -#define DEFECT7374_FSM_WAITING_FOR_CONTROL_READ (1 << DEFECT7374_FSM_FIELD) - -/* Non-SS Control Read: - * - A transition to this state indicates detection of the first HS - * or FS Control Read. - * - Tip: Upon the first SS Control Read the FSM never - * returns to this state. - */ -#define DEFECT7374_FSM_NON_SS_CONTROL_READ (2 << DEFECT7374_FSM_FIELD) - -/* SS Control Read: - * - A transition to this state indicates detection of the - * first SS Control Read. - * - This state indicates workaround completion. Workarounds no longer - * need to be applied (as long as the chip remains powered up). - * - Tip: Once in this state the FSM state does not change (until - * the chip's power is lost and restored). - * - This can be thought of as the final state of the FSM; - * the FSM 'locks-up' in this state until the chip loses power. - */ -#define DEFECT7374_FSM_SS_CONTROL_READ (3 << DEFECT7374_FSM_FIELD) - #ifdef USE_RDK_LEDS static inline void net2280_led_init (struct net2280 *dev) @@ -271,9 +198,6 @@ void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed) { u32 val = readl (&dev->regs->gpioctl); switch (speed) { - case USB_SPEED_SUPER: /* green + red */ - val |= (1 << GPIO0_DATA) | (1 << GPIO1_DATA); - break; case USB_SPEED_HIGH: /* green */ val &= ~(1 << GPIO0_DATA); val |= (1 << GPIO1_DATA); @@ -347,17 +271,6 @@ static inline void net2280_led_shutdown (struct net2280 *dev) /*-------------------------------------------------------------------------*/ -static inline void set_fifo_bytecount(struct net2280_ep *ep, unsigned count) -{ - if (ep->dev->pdev->vendor == 0x17cc) - writeb(count, 2 + (u8 __iomem *) &ep->regs->ep_cfg); - else{ - u32 tmp = readl(&ep->cfg->ep_cfg) & - (~(0x07 << EP_FIFO_BYTE_COUNT)); - writel(tmp | (count << EP_FIFO_BYTE_COUNT), &ep->cfg->ep_cfg); - } -} - static inline void start_out_naking (struct net2280_ep *ep) { /* NOTE: hardware races lurk here, and PING protocol issues */ diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h deleted file mode 100644 index f92eb635b9d3..000000000000 --- a/include/linux/usb/usb338x.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * USB 338x super/high/full speed USB device controller. - * Unlike many such controllers, this one talks PCI. - * - * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) - * Copyright (C) 2003 David Brownell - * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __LINUX_USB_USB338X_H -#define __LINUX_USB_USB338X_H - -#include - -/* - * Extra defined bits for net2280 registers - */ -#define SCRATCH 0x0b - -#define DEFECT7374_FSM_FIELD 28 -#define SUPER_SPEED 8 -#define DMA_REQUEST_OUTSTANDING 5 -#define DMA_PAUSE_DONE_INTERRUPT 26 -#define SET_ISOCHRONOUS_DELAY 24 -#define SET_SEL 22 -#define SUPER_SPEED_MODE 8 - -/*ep_cfg*/ -#define MAX_BURST_SIZE 24 -#define EP_FIFO_BYTE_COUNT 16 -#define IN_ENDPOINT_ENABLE 14 -#define IN_ENDPOINT_TYPE 12 -#define OUT_ENDPOINT_ENABLE 10 -#define OUT_ENDPOINT_TYPE 8 - -struct usb338x_usb_ext_regs { - u32 usbclass; -#define DEVICE_PROTOCOL 16 -#define DEVICE_SUB_CLASS 8 -#define DEVICE_CLASS 0 - u32 ss_sel; -#define U2_SYSTEM_EXIT_LATENCY 8 -#define U1_SYSTEM_EXIT_LATENCY 0 - u32 ss_del; -#define U2_DEVICE_EXIT_LATENCY 8 -#define U1_DEVICE_EXIT_LATENCY 0 - u32 usb2lpm; -#define USB_L1_LPM_HIRD 2 -#define USB_L1_LPM_REMOTE_WAKE 1 -#define USB_L1_LPM_SUPPORT 0 - u32 usb3belt; -#define BELT_MULTIPLIER 10 -#define BEST_EFFORT_LATENCY_TOLERANCE 0 - u32 usbctl2; -#define LTM_ENABLE 7 -#define U2_ENABLE 6 -#define U1_ENABLE 5 -#define FUNCTION_SUSPEND 4 -#define USB3_CORE_ENABLE 3 -#define USB2_CORE_ENABLE 2 -#define SERIAL_NUMBER_STRING_ENABLE 0 - u32 in_timeout; -#define GPEP3_TIMEOUT 19 -#define GPEP2_TIMEOUT 18 -#define GPEP1_TIMEOUT 17 -#define GPEP0_TIMEOUT 16 -#define GPEP3_TIMEOUT_VALUE 13 -#define GPEP3_TIMEOUT_ENABLE 12 -#define GPEP2_TIMEOUT_VALUE 9 -#define GPEP2_TIMEOUT_ENABLE 8 -#define GPEP1_TIMEOUT_VALUE 5 -#define GPEP1_TIMEOUT_ENABLE 4 -#define GPEP0_TIMEOUT_VALUE 1 -#define GPEP0_TIMEOUT_ENABLE 0 - u32 isodelay; -#define ISOCHRONOUS_DELAY 0 -} __packed; - -struct usb338x_fifo_regs { - /* offset 0x0500, 0x0520, 0x0540, 0x0560, 0x0580 */ - u32 ep_fifo_size_base; -#define IN_FIFO_BASE_ADDRESS 22 -#define IN_FIFO_SIZE 16 -#define OUT_FIFO_BASE_ADDRESS 6 -#define OUT_FIFO_SIZE 0 - u32 ep_fifo_out_wrptr; - u32 ep_fifo_out_rdptr; - u32 ep_fifo_in_wrptr; - u32 ep_fifo_in_rdptr; - u32 unused[3]; -} __packed; - - -/* Link layer */ -struct usb338x_ll_regs { - /* offset 0x700 */ - u32 ll_ltssm_ctrl1; - u32 ll_ltssm_ctrl2; - u32 ll_ltssm_ctrl3; - u32 unused[2]; - u32 ll_general_ctrl0; - u32 ll_general_ctrl1; -#define PM_U3_AUTO_EXIT 29 -#define PM_U2_AUTO_EXIT 28 -#define PM_U1_AUTO_EXIT 27 -#define PM_FORCE_U2_ENTRY 26 -#define PM_FORCE_U1_ENTRY 25 -#define PM_LGO_COLLISION_SEND_LAU 24 -#define PM_DIR_LINK_REJECT 23 -#define PM_FORCE_LINK_ACCEPT 22 -#define PM_DIR_ENTRY_U3 20 -#define PM_DIR_ENTRY_U2 19 -#define PM_DIR_ENTRY_U1 18 -#define PM_U2_ENABLE 17 -#define PM_U1_ENABLE 16 -#define SKP_THRESHOLD_ADJUST_FMW 8 -#define RESEND_DPP_ON_LRTY_FMW 7 -#define DL_BIT_VALUE_FMW 6 -#define FORCE_DL_BIT 5 - u32 ll_general_ctrl2; -#define SELECT_INVERT_LANE_POLARITY 7 -#define FORCE_INVERT_LANE_POLARITY 6 - u32 ll_general_ctrl3; - u32 ll_general_ctrl4; - u32 ll_error_gen; -} __packed; - -struct usb338x_ll_lfps_regs { - /* offset 0x748 */ - u32 ll_lfps_5; -#define TIMER_LFPS_6US 16 - u32 ll_lfps_6; -#define TIMER_LFPS_80US 0 -} __packed; - -struct usb338x_ll_tsn_regs { - /* offset 0x77C */ - u32 ll_tsn_counters_2; -#define HOT_TX_NORESET_TS2 24 - u32 ll_tsn_counters_3; -#define HOT_RX_RESET_TS2 0 -} __packed; - -struct usb338x_ll_chi_regs { - /* offset 0x79C */ - u32 ll_tsn_chicken_bit; -#define RECOVERY_IDLE_TO_RECOVER_FMW 3 -} __packed; - -/* protocol layer */ -struct usb338x_pl_regs { - /* offset 0x800 */ - u32 pl_reg_1; - u32 pl_reg_2; - u32 pl_reg_3; - u32 pl_reg_4; - u32 pl_ep_ctrl; - /* Protocol Layer Endpoint Control*/ -#define PL_EP_CTRL 0x810 -#define ENDPOINT_SELECT 0 - /* [4:0] */ -#define EP_INITIALIZED 16 -#define SEQUENCE_NUMBER_RESET 17 -#define CLEAR_ACK_ERROR_CODE 20 - u32 pl_reg_6; - u32 pl_reg_7; - u32 pl_reg_8; - u32 pl_ep_status_1; - /* Protocol Layer Endpoint Status 1*/ -#define PL_EP_STATUS_1 0x820 -#define STATE 16 -#define ACK_GOOD_NORMAL 0x11 -#define ACK_GOOD_MORE_ACKS_TO_COME 0x16 - u32 pl_ep_status_2; - u32 pl_ep_status_3; - /* Protocol Layer Endpoint Status 3*/ -#define PL_EP_STATUS_3 0x828 -#define SEQUENCE_NUMBER 0 - u32 pl_ep_status_4; - /* Protocol Layer Endpoint Status 4*/ -#define PL_EP_STATUS_4 0x82c - u32 pl_ep_cfg_4; - /* Protocol Layer Endpoint Configuration 4*/ -#define PL_EP_CFG_4 0x830 -#define NON_CTRL_IN_TOLERATE_BAD_DIR 6 -} __packed; - -#endif /* __LINUX_USB_USB338X_H */ -- cgit v1.2.3 From e0b0baadb7a4509bdcd5ba37d0be61e2c4bb0d48 Mon Sep 17 00:00:00 2001 From: Richard Genoud Date: Tue, 13 May 2014 20:20:44 +0200 Subject: tty/serial: at91: use mctrl_gpio helpers On sam9x5, dedicated CTS (and RTS) pins are unusable together with the LCDC, the EMAC, or the MMC because they share the same line. Moreover, the USART controller doesn't handle DTR/DSR/DCD/RI signals, so we have to control them via GPIO. This patch permits to use GPIOs to control the CTS/RTS/DTR/DSR/DCD/RI signals. Signed-off-by: Richard Genoud Acked-by: Greg Kroah-Hartman Acked-by: Nicolas Ferre Signed-off-by: Greg Kroah-Hartman --- .../devicetree/bindings/serial/atmel-usart.txt | 10 +- arch/arm/mach-at91/at91rm9200_devices.c | 16 ++-- arch/arm/mach-at91/at91sam9260_devices.c | 7 -- arch/arm/mach-at91/at91sam9261_devices.c | 4 - arch/arm/mach-at91/at91sam9263_devices.c | 4 - arch/arm/mach-at91/at91sam9g45_devices.c | 5 - arch/arm/mach-at91/at91sam9rl_devices.c | 5 - drivers/tty/serial/Kconfig | 1 + drivers/tty/serial/atmel_serial.c | 105 ++++++++++++++------- include/linux/platform_data/atmel.h | 1 - 10 files changed, 89 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt index 2f7aad71b3c9..a6391e70a8fd 100644 --- a/Documentation/devicetree/bindings/serial/atmel-usart.txt +++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt @@ -13,8 +13,9 @@ Required properties: Optional properties: - atmel,use-dma-rx: use of PDC or DMA for receiving data - atmel,use-dma-tx: use of PDC or DMA for transmitting data -- rts-gpios: specify a GPIO for RTS line. It will use specified PIO instead of the peripheral - function pin for the USART RTS feature. If unsure, don't specify this property. +- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD line respectively. + It will use specified PIO instead of the peripheral function pin for the USART feature. + If unsure, don't specify this property. - add dma bindings for dma transfer: - dmas: DMA specifier, consisting of a phandle to DMA controller node, memory peripheral interface and USART DMA channel ID, FIFO configuration. @@ -36,6 +37,11 @@ Example: atmel,use-dma-rx; atmel,use-dma-tx; rts-gpios = <&pioD 15 GPIO_ACTIVE_LOW>; + cts-gpios = <&pioD 16 GPIO_ACTIVE_LOW>; + dtr-gpios = <&pioD 17 GPIO_ACTIVE_LOW>; + dsr-gpios = <&pioD 18 GPIO_ACTIVE_LOW>; + dcd-gpios = <&pioD 20 GPIO_ACTIVE_LOW>; + rng-gpios = <&pioD 19 GPIO_ACTIVE_LOW>; }; - use DMA: diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index f3f19f21352a..291a90a5b1d4 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -923,7 +924,6 @@ static struct resource dbgu_resources[] = { static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ - .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); @@ -962,7 +962,14 @@ static struct resource uart0_resources[] = { static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, +}; + +static struct gpiod_lookup_table uart0_gpios_table = { + .dev_id = "atmel_usart", + .table = { + GPIO_LOOKUP("pioA", 21, "rts", GPIO_ACTIVE_LOW), + { }, + }, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); @@ -993,7 +1000,7 @@ static inline void configure_usart0_pins(unsigned pins) * We need to drive the pin manually. The serial driver will driver * this to high when initializing. */ - uart0_data.rts_gpio = AT91_PIN_PA21; + gpiod_add_lookup_table(&uart0_gpios_table); } } @@ -1013,7 +1020,6 @@ static struct resource uart1_resources[] = { static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); @@ -1065,7 +1071,6 @@ static struct resource uart2_resources[] = { static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); @@ -1109,7 +1114,6 @@ static struct resource uart3_resources[] = { static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index a0282928e9c1..526453ecdaff 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -820,7 +820,6 @@ static struct resource dbgu_resources[] = { static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ - .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); @@ -859,7 +858,6 @@ static struct resource uart0_resources[] = { static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); @@ -911,7 +909,6 @@ static struct resource uart1_resources[] = { static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); @@ -955,7 +952,6 @@ static struct resource uart2_resources[] = { static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); @@ -999,7 +995,6 @@ static struct resource uart3_resources[] = { static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); @@ -1043,7 +1038,6 @@ static struct resource uart4_resources[] = { static struct atmel_uart_data uart4_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart4_dmamask = DMA_BIT_MASK(32); @@ -1082,7 +1076,6 @@ static struct resource uart5_resources[] = { static struct atmel_uart_data uart5_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart5_dmamask = DMA_BIT_MASK(32); diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 80e35895d28f..b5f7a7226ff8 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -881,7 +881,6 @@ static struct resource dbgu_resources[] = { static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ - .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); @@ -920,7 +919,6 @@ static struct resource uart0_resources[] = { static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); @@ -964,7 +962,6 @@ static struct resource uart1_resources[] = { static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); @@ -1008,7 +1005,6 @@ static struct resource uart2_resources[] = { static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index 43d53d6156dd..39803c3296b2 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -1325,7 +1325,6 @@ static struct resource dbgu_resources[] = { static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ - .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); @@ -1364,7 +1363,6 @@ static struct resource uart0_resources[] = { static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); @@ -1408,7 +1406,6 @@ static struct resource uart1_resources[] = { static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); @@ -1452,7 +1449,6 @@ static struct resource uart2_resources[] = { static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index dab362c06487..4dfedd3f2e15 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1588,7 +1588,6 @@ static struct resource dbgu_resources[] = { static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, - .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); @@ -1627,7 +1626,6 @@ static struct resource uart0_resources[] = { static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); @@ -1671,7 +1669,6 @@ static struct resource uart1_resources[] = { static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); @@ -1715,7 +1712,6 @@ static struct resource uart2_resources[] = { static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); @@ -1759,7 +1755,6 @@ static struct resource uart3_resources[] = { static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index 428fc412aaf1..f75985062913 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -957,7 +957,6 @@ static struct resource dbgu_resources[] = { static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ - .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); @@ -996,7 +995,6 @@ static struct resource uart0_resources[] = { static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); @@ -1048,7 +1046,6 @@ static struct resource uart1_resources[] = { static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); @@ -1092,7 +1089,6 @@ static struct resource uart2_resources[] = { static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); @@ -1136,7 +1132,6 @@ static struct resource uart3_resources[] = { static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, - .rts_gpio = -EINVAL, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 4bf6d220357b..fb57159bad3a 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -117,6 +117,7 @@ config SERIAL_ATMEL bool "AT91 / AT32 on-chip serial port support" depends on ARCH_AT91 || AVR32 select SERIAL_CORE + select SERIAL_MCTRL_GPIO help This enables the driver for the on-chip UARTs of the Atmel AT91 and AT32 processors. diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 53eeea13ff16..43ca659c1d4b 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include @@ -57,6 +59,8 @@ #include +#include "serial_mctrl_gpio.h" + static void atmel_start_rx(struct uart_port *port); static void atmel_stop_rx(struct uart_port *port); @@ -162,7 +166,7 @@ struct atmel_uart_port { struct circ_buf rx_ring; struct serial_rs485 rs485; /* rs485 settings */ - int rts_gpio; /* optional RTS GPIO */ + struct mctrl_gpios *gpios; unsigned int tx_done_mask; bool is_usart; /* usart or uart */ struct timer_list uart_timer; /* uart timer */ @@ -237,6 +241,50 @@ static bool atmel_use_dma_rx(struct uart_port *port) return atmel_port->use_dma_rx; } +static unsigned int atmel_get_lines_status(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int status, ret = 0; + + status = UART_GET_CSR(port); + + mctrl_gpio_get(atmel_port->gpios, &ret); + + if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, + UART_GPIO_CTS))) { + if (ret & TIOCM_CTS) + status &= ~ATMEL_US_CTS; + else + status |= ATMEL_US_CTS; + } + + if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, + UART_GPIO_DSR))) { + if (ret & TIOCM_DSR) + status &= ~ATMEL_US_DSR; + else + status |= ATMEL_US_DSR; + } + + if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, + UART_GPIO_RI))) { + if (ret & TIOCM_RI) + status &= ~ATMEL_US_RI; + else + status |= ATMEL_US_RI; + } + + if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, + UART_GPIO_DCD))) { + if (ret & TIOCM_CD) + status &= ~ATMEL_US_DCD; + else + status |= ATMEL_US_DCD; + } + + return status; +} + /* Enable or disable the rs485 support */ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) { @@ -296,17 +344,6 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) unsigned int mode; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - /* - * AT91RM9200 Errata #39: RTS0 is not internally connected - * to PA21. We need to drive the pin as a GPIO. - */ - if (gpio_is_valid(atmel_port->rts_gpio)) { - if (mctrl & TIOCM_RTS) - gpio_set_value(atmel_port->rts_gpio, 0); - else - gpio_set_value(atmel_port->rts_gpio, 1); - } - if (mctrl & TIOCM_RTS) control |= ATMEL_US_RTSEN; else @@ -319,6 +356,8 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) UART_PUT_CR(port, control); + mctrl_gpio_set(atmel_port->gpios, mctrl); + /* Local loopback mode? */ mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE; if (mctrl & TIOCM_LOOP) @@ -346,7 +385,8 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) */ static u_int atmel_get_mctrl(struct uart_port *port) { - unsigned int status, ret = 0; + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned int ret = 0, status; status = UART_GET_CSR(port); @@ -362,7 +402,7 @@ static u_int atmel_get_mctrl(struct uart_port *port) if (!(status & ATMEL_US_RI)) ret |= TIOCM_RI; - return ret; + return mctrl_gpio_get(atmel_port->gpios, &ret); } /* @@ -1042,7 +1082,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) unsigned int status, pending, pass_counter = 0; do { - status = UART_GET_CSR(port); + status = atmel_get_lines_status(port); pending = status & UART_GET_IMR(port); if (!pending) break; @@ -1568,7 +1608,7 @@ static int atmel_startup(struct uart_port *port) } /* Save current CSR for comparison in atmel_tasklet_func() */ - atmel_port->irq_status_prev = UART_GET_CSR(port); + atmel_port->irq_status_prev = atmel_get_lines_status(port); atmel_port->irq_status = atmel_port->irq_status_prev; /* @@ -2324,6 +2364,15 @@ static int atmel_serial_resume(struct platform_device *pdev) #define atmel_serial_resume NULL #endif +static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev) +{ + p->gpios = mctrl_gpio_init(dev, 0); + if (IS_ERR_OR_NULL(p->gpios)) + return -1; + + return 0; +} + static int atmel_serial_probe(struct platform_device *pdev) { struct atmel_uart_port *port; @@ -2359,25 +2408,11 @@ static int atmel_serial_probe(struct platform_device *pdev) port = &atmel_ports[ret]; port->backup_imr = 0; port->uart.line = ret; - port->rts_gpio = -EINVAL; /* Invalid, zero could be valid */ - if (pdata) - port->rts_gpio = pdata->rts_gpio; - else if (np) - port->rts_gpio = of_get_named_gpio(np, "rts-gpios", 0); - - if (gpio_is_valid(port->rts_gpio)) { - ret = devm_gpio_request(&pdev->dev, port->rts_gpio, "RTS"); - if (ret) { - dev_err(&pdev->dev, "error requesting RTS GPIO\n"); - goto err; - } - /* Default to 1 as RTS is active low */ - ret = gpio_direction_output(port->rts_gpio, 1); - if (ret) { - dev_err(&pdev->dev, "error setting up RTS GPIO\n"); - goto err; - } - } + + ret = atmel_init_gpios(port, &pdev->dev); + if (ret < 0) + dev_err(&pdev->dev, "%s", + "Failed to initialize GPIOs. The serial port may not work as expected"); ret = atmel_init_port(port, pdev); if (ret) diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index e26b0c14edea..cea9f70133c5 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -84,7 +84,6 @@ struct atmel_uart_data { short use_dma_rx; /* use receive DMA? */ void __iomem *regs; /* virt. base address, if any */ struct serial_rs485 rs485; /* rs485 settings */ - int rts_gpio; /* optional RTS GPIO */ }; /* Touchscreen Controller */ -- cgit v1.2.3 From e4bdab70dd07d8648a1ec3e029239aa86eb836b6 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 13 May 2014 12:09:28 +0200 Subject: console: Use explicit pointer type for vc_uni_pagedir* fields The vc_data.vc_uni_pagedir filed is currently long int, supposedly to be served generically. This, however, leads to lots of cast to pointer, and rather it worsens the readability significantly. Actually, we have now only a single uni_pagedir map implementation, and this won't change likely. So, it'd be much more simple and error-prone to just use the exact pointer for struct uni_pagedir instead of long. Ditto for vc_uni_pagedir_loc. It's a pointer to the uni_pagedir, thus it can be changed similarly to the exact type. Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/consolemap.c | 38 +++++++++++++++++++------------------- drivers/tty/vt/vt.c | 2 +- drivers/video/console/vgacon.c | 4 ++-- include/linux/console_struct.h | 5 +++-- 4 files changed, 25 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c index 2978ca596a7f..3fdc786b6b2f 100644 --- a/drivers/tty/vt/consolemap.c +++ b/drivers/tty/vt/consolemap.c @@ -262,7 +262,7 @@ u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode) int m; if (glyph < 0 || glyph >= MAX_GLYPH) return 0; - else if (!(p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc)) + else if (!(p = *conp->vc_uni_pagedir_loc)) return glyph; else if (use_unicode) { if (!p->inverse_trans_unicode) @@ -287,7 +287,7 @@ static void update_user_maps(void) for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons_allocated(i)) continue; - p = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc; + p = *vc_cons[i].d->vc_uni_pagedir_loc; if (p && p != q) { set_inverse_transl(vc_cons[i].d, p, USER_MAP); set_inverse_trans_unicode(vc_cons[i].d, p); @@ -418,10 +418,10 @@ void con_free_unimap(struct vc_data *vc) { struct uni_pagedir *p; - p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + p = *vc->vc_uni_pagedir_loc; if (!p) return; - *vc->vc_uni_pagedir_loc = 0; + *vc->vc_uni_pagedir_loc = NULL; if (--p->refcount) return; con_release_unimap(p); @@ -436,7 +436,7 @@ static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p) for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons_allocated(i)) continue; - q = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc; + q = *vc_cons[i].d->vc_uni_pagedir_loc; if (!q || q == p || q->sum != p->sum) continue; for (j = 0; j < 32; j++) { @@ -459,7 +459,7 @@ static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p) } if (j == 32) { q->refcount++; - *conp->vc_uni_pagedir_loc = (unsigned long)q; + *conp->vc_uni_pagedir_loc = q; con_release_unimap(p); kfree(p); return 1; @@ -500,7 +500,7 @@ static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui) { struct uni_pagedir *p, *q; - p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + p = *vc->vc_uni_pagedir_loc; if (p && p->readonly) return -EIO; @@ -512,7 +512,7 @@ static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui) return -ENOMEM; } q->refcount=1; - *vc->vc_uni_pagedir_loc = (unsigned long)q; + *vc->vc_uni_pagedir_loc = q; } else { if (p == dflt) dflt = NULL; p->refcount++; @@ -539,7 +539,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) console_lock(); /* Save original vc_unipagdir_loc in case we allocate a new one */ - p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + p = *vc->vc_uni_pagedir_loc; if (p->readonly) { console_unlock(); return -EIO; @@ -564,7 +564,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) * Since refcount was > 1, con_clear_unimap() allocated a * a new uni_pagedir for this vc. Re: p != q */ - q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + q = *vc->vc_uni_pagedir_loc; /* * uni_pgdir is a 32*32*64 table with rows allocated @@ -586,7 +586,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) err1 = con_insert_unipair(q, l, p2[k]); if (err1) { p->refcount++; - *vc->vc_uni_pagedir_loc = (unsigned long)p; + *vc->vc_uni_pagedir_loc = p; con_release_unimap(q); kfree(q); console_unlock(); @@ -655,12 +655,12 @@ int con_set_default_unimap(struct vc_data *vc) struct uni_pagedir *p; if (dflt) { - p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + p = *vc->vc_uni_pagedir_loc; if (p == dflt) return 0; dflt->refcount++; - *vc->vc_uni_pagedir_loc = (unsigned long)dflt; + *vc->vc_uni_pagedir_loc = dflt; if (p && !--p->refcount) { con_release_unimap(p); kfree(p); @@ -674,7 +674,7 @@ int con_set_default_unimap(struct vc_data *vc) if (err) return err; - p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + p = *vc->vc_uni_pagedir_loc; q = dfont_unitable; for (i = 0; i < 256; i++) @@ -685,7 +685,7 @@ int con_set_default_unimap(struct vc_data *vc) } if (con_unify_unimap(vc, p)) { - dflt = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + dflt = *vc->vc_uni_pagedir_loc; return err; } @@ -713,9 +713,9 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc) return 0; con_free_unimap(dst_vc); - q = (struct uni_pagedir *)*src_vc->vc_uni_pagedir_loc; + q = *src_vc->vc_uni_pagedir_loc; q->refcount++; - *dst_vc->vc_uni_pagedir_loc = (long)q; + *dst_vc->vc_uni_pagedir_loc = q; return 0; } EXPORT_SYMBOL(con_copy_unimap); @@ -737,7 +737,7 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni ect = 0; if (*vc->vc_uni_pagedir_loc) { - p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + p = *vc->vc_uni_pagedir_loc; for (i = 0; i < 32; i++) if ((p1 = p->uni_pgdir[i])) for (j = 0; j < 32; j++) @@ -810,7 +810,7 @@ conv_uni_to_pc(struct vc_data *conp, long ucs) if (!*conp->vc_uni_pagedir_loc) return -3; - p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc; + p = *conp->vc_uni_pagedir_loc; if ((p1 = p->uni_pgdir[ucs >> 11]) && (p2 = p1[(ucs >> 6) & 0x1f]) && (h = p2[ucs & 0x3f]) < MAX_GLYPH) diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 5149a72a84ff..5e0f6ff2e2f5 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -735,7 +735,7 @@ static void visual_init(struct vc_data *vc, int num, int init) vc->vc_num = num; vc->vc_display_fg = &master_display_fg; vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; - vc->vc_uni_pagedir = 0; + vc->vc_uni_pagedir = NULL; vc->vc_hi_font_mask = 0; vc->vc_complement_mask = 0; vc->vc_can_do_color = 0; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 9e18770aaba6..f267284b423b 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -87,7 +87,7 @@ static void vgacon_save_screen(struct vc_data *c); static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, int lines); static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); -static unsigned long vgacon_uni_pagedir; +static struct uni_pagedir *vgacon_uni_pagedir; static int vgacon_refcount; /* Description of the hardware situation */ @@ -554,7 +554,7 @@ static const char *vgacon_startup(void) static void vgacon_init(struct vc_data *c, int init) { - unsigned long p; + struct uni_pagedir *p; /* * We cannot be loaded as a module, therefore init is always 1, diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 7f0c32908568..e859c98d1767 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -17,6 +17,7 @@ #include struct vt_struct; +struct uni_pagedir; #define NPAR 16 @@ -104,8 +105,8 @@ struct vc_data { unsigned int vc_bell_pitch; /* Console bell pitch */ unsigned int vc_bell_duration; /* Console bell duration */ struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ - unsigned long vc_uni_pagedir; - unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ + struct uni_pagedir *vc_uni_pagedir; + struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ /* additional information is in vt_kern.h */ }; -- cgit v1.2.3 From 782a985d7af26db39e86070d28f987cad21313c0 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 20 May 2014 08:53:21 -0600 Subject: PCI: Introduce new device binding path using pci_dev.driver_override The driver_override field allows us to specify the driver for a device rather than relying on the driver to provide a positive match of the device. This shortcuts the existing process of looking up the vendor and device ID, adding them to the driver new_id, binding the device, then removing the ID, but it also provides a couple advantages. First, the above existing process allows the driver to bind to any device matching the new_id for the window where it's enabled. This is often not desired, such as the case of trying to bind a single device to a meta driver like pci-stub or vfio-pci. Using driver_override we can do this deterministically using: echo pci-stub > /sys/bus/pci/devices/0000:03:00.0/driver_override echo 0000:03:00.0 > /sys/bus/pci/devices/0000:03:00.0/driver/unbind echo 0000:03:00.0 > /sys/bus/pci/drivers_probe Previously we could not invoke drivers_probe after adding a device to new_id for a driver as we get non-deterministic behavior whether the driver we intend or the standard driver will claim the device. Now it becomes a deterministic process, only the driver matching driver_override will probe the device. To return the device to the standard driver, we simply clear the driver_override and reprobe the device: echo > /sys/bus/pci/devices/0000:03:00.0/driver_override echo 0000:03:00.0 > /sys/bus/pci/devices/0000:03:00.0/driver/unbind echo 0000:03:00.0 > /sys/bus/pci/drivers_probe Another advantage to this approach is that we can specify a driver override to force a specific binding or prevent any binding. For instance when an IOMMU group is exposed to userspace through VFIO we require that all devices within that group are owned by VFIO. However, devices can be hot-added into an IOMMU group, in which case we want to prevent the device from binding to any driver (override driver = "none") or perhaps have it automatically bind to vfio-pci. With driver_override it's a simple matter for this field to be set internally when the device is first discovered to prevent driver matches. Signed-off-by: Alex Williamson Signed-off-by: Bjorn Helgaas Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Alexander Graf Acked-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-bus-pci | 21 +++++++++++++++++ drivers/pci/pci-driver.c | 25 ++++++++++++++++++--- drivers/pci/pci-sysfs.c | 40 +++++++++++++++++++++++++++++++++ drivers/pci/probe.c | 1 + include/linux/pci.h | 1 + 5 files changed, 85 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index a3c5a6685036..898ddc4440e6 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci @@ -250,3 +250,24 @@ Description: valid. For example, writing a 2 to this file when sriov_numvfs is not 0 and not 2 already will return an error. Writing a 10 when the value of sriov_totalvfs is 8 will return an error. + +What: /sys/bus/pci/devices/.../driver_override +Date: April 2014 +Contact: Alex Williamson +Description: + This file allows the driver for a device to be specified which + will override standard static and dynamic ID matching. When + specified, only a driver with a name matching the value written + to driver_override will have an opportunity to bind to the + device. The override is specified by writing a string to the + driver_override file (echo pci-stub > driver_override) and + may be cleared with an empty string (echo > driver_override). + This returns the device to standard matching rules binding. + Writing to driver_override does not automatically unbind the + device from its current driver or make any attempt to + automatically load the specified driver. If no driver with a + matching name is currently loaded in the kernel, the device + will not bind to any driver. This also allows devices to + opt-out of driver binding using a driver_override name such as + "none". Only a single driver may be specified in the override, + there is no support for parsing delimiters. diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d911e0c1f359..4393c12e9135 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -216,6 +216,13 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, return NULL; } +static const struct pci_device_id pci_device_id_any = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, +}; + /** * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure * @drv: the PCI driver to match against @@ -229,18 +236,30 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev) { struct pci_dynid *dynid; + const struct pci_device_id *found_id = NULL; + + /* When driver_override is set, only bind to the matching driver */ + if (dev->driver_override && strcmp(dev->driver_override, drv->name)) + return NULL; /* Look at the dynamic ids first, before the static ones */ spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (pci_match_one_device(&dynid->id, dev)) { - spin_unlock(&drv->dynids.lock); - return &dynid->id; + found_id = &dynid->id; + break; } } spin_unlock(&drv->dynids.lock); - return pci_match_id(drv->id_table, dev); + if (!found_id) + found_id = pci_match_id(drv->id_table, dev); + + /* driver_override will always match, send a dummy id */ + if (!found_id && dev->driver_override) + found_id = &pci_device_id_any; + + return found_id; } struct drv_dev_and_id { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 4e0acefb7565..faa4ab554d68 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -499,6 +499,45 @@ static struct device_attribute sriov_numvfs_attr = sriov_numvfs_show, sriov_numvfs_store); #endif /* CONFIG_PCI_IOV */ +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + char *driver_override, *old = pdev->driver_override, *cp; + + if (count > PATH_MAX) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%s\n", pdev->driver_override); +} +static DEVICE_ATTR_RW(driver_override); + static struct attribute *pci_dev_attrs[] = { &dev_attr_resource.attr, &dev_attr_vendor.attr, @@ -521,6 +560,7 @@ static struct attribute *pci_dev_attrs[] = { #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) &dev_attr_d3cold_allowed.attr, #endif + &dev_attr_driver_override.attr, NULL, }; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ef09f5f2fe6c..54268de45f59 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1215,6 +1215,7 @@ static void pci_release_dev(struct device *dev) pci_release_of_node(pci_dev); pcibios_release_device(pci_dev); pci_bus_put(pci_dev->bus); + kfree(pci_dev->driver_override); kfree(pci_dev); } diff --git a/include/linux/pci.h b/include/linux/pci.h index aab57b4abe7f..b72af276f591 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -365,6 +365,7 @@ struct pci_dev { #endif phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ size_t romlen; /* Length of ROM if it's not from the BAR */ + char *driver_override; /* Driver name to force a match */ }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) -- cgit v1.2.3 From 4d92a9beb39d80a7d8ff7c04ae12a10290105ae5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 May 2014 08:09:00 -0600 Subject: block: remove 'magic' from struct blk_plug I don't think we've ever caught any bugs with this, and there's the list poisoning for the plug lists to catch uninitialized cases. So remove the magic member and save 8 bytes in the struct. Signed-off-by: Jens Axboe --- block/blk-core.c | 5 ----- include/linux/blkdev.h | 1 - 2 files changed, 6 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index d87be5b4e554..40d654861c33 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2957,8 +2957,6 @@ int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, } EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); -#define PLUG_MAGIC 0x91827364 - /** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized @@ -2977,7 +2975,6 @@ void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; - plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->mq_list); INIT_LIST_HEAD(&plug->cb_list); @@ -3074,8 +3071,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) LIST_HEAD(list); unsigned int depth; - BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug, from_schedule); if (!list_empty(&plug->mq_list)) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 913f1c2d3be0..098304576d51 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1060,7 +1060,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - unsigned long magic; /* detect uninitialized use-cases */ struct list_head list; /* requests */ struct list_head mq_list; /* blk-mq requests */ struct list_head cb_list; /* md requires an unplug callback */ -- cgit v1.2.3 From 05f1dd5315217398fc8d122bdee80f96a9f21274 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 May 2014 09:53:32 -0600 Subject: block: add queue flag for disabling SG merging If devices are not SG starved, we waste a lot of time potentially collapsing SG segments. Enough that 1.5% of the CPU time goes to this, at only 400K IOPS. Add a queue flag, QUEUE_FLAG_NO_SG_MERGE, which just returns the number of vectors in a bio instead of looping over all segments and checking for collapsible ones. Add a BLK_MQ_F_SG_MERGE flag so that drivers can opt-in on the sg merging, if they so desire. Signed-off-by: Jens Axboe --- block/blk-merge.c | 28 +++++++++++++++++++++------- block/blk-mq.c | 3 +++ include/linux/blk-mq.h | 1 + include/linux/blkdev.h | 1 + 4 files changed, 26 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/block/blk-merge.c b/block/blk-merge.c index 6c583f9c5b65..b3bf0df0f4c2 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { struct bio_vec bv, bvprv = { NULL }; - int cluster, high, highprv = 1; + int cluster, high, highprv = 1, no_sg_merge; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; + no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); + high = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { + /* + * If SG merging is disabled, each bio vector is + * a segment + */ + if (no_sg_merge) + goto new_segment; + /* * the trick here is making sure that a high page is - * never considered part of another segment, since that - * might change with the bounce page. + * never considered part of another segment, since + * that might change with the bounce page. */ high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); if (!high && !highprv && cluster) { @@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq) void blk_recount_segments(struct request_queue *q, struct bio *bio) { - struct bio *nxt = bio->bi_next; + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) + bio->bi_phys_segments = bio->bi_vcnt; + else { + struct bio *nxt = bio->bi_next; + + bio->bi_next = NULL; + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); + bio->bi_next = nxt; + } - bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); - bio->bi_next = nxt; bio->bi_flags |= (1 << BIO_SEG_VALID); } EXPORT_SYMBOL(blk_recount_segments); diff --git a/block/blk-mq.c b/block/blk-mq.c index f27fe44230c2..f98d977fd150 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1829,6 +1829,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->mq_ops = set->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + if (!(set->flags & BLK_MQ_F_SG_MERGE)) + q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; + q->sg_reserved_size = INT_MAX; INIT_WORK(&q->requeue_work, blk_mq_requeue_work); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 91dfb75ce39f..95de239444d2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -129,6 +129,7 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_TAG_SHARED = 1 << 2, + BLK_MQ_F_SG_MERGE = 1 << 3, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 098304576d51..695b9fd41efe 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -510,6 +510,7 @@ struct request_queue { #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ +#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ -- cgit v1.2.3 From 4055e5e54ecea4a41edec42f6bd4ee274892e872 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Thu, 29 May 2014 10:23:55 -0600 Subject: usb: host: max3421-hcd: Allow platform-data to specify Vbus polarity Signed-off-by: Davidm Mosberger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/max3421-hcd.c | 6 ++++-- include/linux/platform_data/max3421-hcd.h | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index ccb1bc42b4d2..fd3ed994fa4d 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1717,7 +1717,8 @@ max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index, break; case USB_PORT_FEAT_POWER: dev_dbg(hcd->self.controller, "power-off\n"); - max3421_gpout_set_value(hcd, pdata->vbus_gpout, 0); + max3421_gpout_set_value(hcd, pdata->vbus_gpout, + !pdata->vbus_active_level); /* FALLS THROUGH */ default: max3421_hcd->port_status &= ~(1 << value); @@ -1766,7 +1767,8 @@ max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index, case USB_PORT_FEAT_POWER: dev_dbg(hcd->self.controller, "power-on\n"); max3421_hcd->port_status |= USB_PORT_STAT_POWER; - max3421_gpout_set_value(hcd, pdata->vbus_gpout, 1); + max3421_gpout_set_value(hcd, pdata->vbus_gpout, + pdata->vbus_active_level); break; case USB_PORT_FEAT_RESET: max3421_reset_port(hcd); diff --git a/include/linux/platform_data/max3421-hcd.h b/include/linux/platform_data/max3421-hcd.h index 4ad459605d87..0303d1970084 100644 --- a/include/linux/platform_data/max3421-hcd.h +++ b/include/linux/platform_data/max3421-hcd.h @@ -18,6 +18,7 @@ */ struct max3421_hcd_platform_data { u8 vbus_gpout; /* pin controlling Vbus */ + u8 vbus_active_level; /* level that turns on power */ }; #endif /* MAX3421_HCD_PLAT_H_INCLUDED */ -- cgit v1.2.3 From 9113e260767b1cb44f8da0e5922e1a9a5417c4b8 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 28 May 2014 15:23:37 +0800 Subject: power_supply: allow power supply devices registered w/o wakeup source Currently, all the power supply devices are registered with wakeup source, this results in that every power_supply_changed() invocation brings the system out of suspend-to-freeze state. This is overkill as some device drivers, e.g. ACPI battery driver, have the ability to check the device status and wake up the system from sleeping only when necessary. Thus introduce a new API which allows device to be registered w/o wakeup source. Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki --- drivers/power/power_supply_core.c | 15 +++++++++++++-- include/linux/power_supply.h | 2 ++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 26606641fe44..5a5a24e7d43c 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -537,7 +537,7 @@ static void psy_unregister_cooler(struct power_supply *psy) } #endif -int power_supply_register(struct device *parent, struct power_supply *psy) +int __power_supply_register(struct device *parent, struct power_supply *psy, bool ws) { struct device *dev; int rc; @@ -568,7 +568,7 @@ int power_supply_register(struct device *parent, struct power_supply *psy) } spin_lock_init(&psy->changed_lock); - rc = device_init_wakeup(dev, true); + rc = device_init_wakeup(dev, ws); if (rc) goto wakeup_init_failed; @@ -606,8 +606,19 @@ dev_set_name_failed: success: return rc; } + +int power_supply_register(struct device *parent, struct power_supply *psy) +{ + return __power_supply_register(parent, psy, true); +} EXPORT_SYMBOL_GPL(power_supply_register); +int power_supply_register_no_ws(struct device *parent, struct power_supply *psy) +{ + return __power_supply_register(parent, psy, false); +} +EXPORT_SYMBOL_GPL(power_supply_register_no_ws); + void power_supply_unregister(struct power_supply *psy) { cancel_work_sync(&psy->changed_work); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index c9dc4e09854c..f2b76aeaf4e4 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -264,6 +264,8 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } extern int power_supply_register(struct device *parent, struct power_supply *psy); +extern int power_supply_register_no_ws(struct device *parent, + struct power_supply *psy); extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); -- cgit v1.2.3 From eec15edbb0e14485998635ea7c62e30911b465f0 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 30 May 2014 04:23:01 +0200 Subject: ACPI / PNP: use device ID list for PNPACPI device enumeration ACPI can be used to enumerate PNP devices, but the code does not handle this in the right way currently. Namely, if an ACPI device object 1. Has a _CRS method, 2. Has an identification of "three capital characters followed by four hex digits", 3. Is not in the excluded IDs list, it will be enumerated to PNP bus (that is, a PNP device object will be create for it). This means that, actually, the PNP bus type is used as the default bus type for enumerating _HID devices in ACPI. However, more and more _HID devices need to be enumerated to the platform bus instead (that is, platform device objects need to be created for them). As a result, the device ID list in acpi_platform.c is used to enforce creating platform device objects rather than PNP device objects for matching devices. That list has been continuously growing recently, unfortunately, and it is pretty much guaranteed to grow even more in the future. To address that problem it is better to enumerate _HID devices as platform devices by default. To this end, change the way of enumerating PNP devices by adding a PNP ACPI scan handler that will use a device ID list to create PNP devices for the ACPI device objects whose device IDs are present in that list. The initial device ID list in the PNP ACPI scan handler contains all of the pnp_device_id strings from all the existing PNP drivers, so this change should be transparent to the PNP core and all of the PNP drivers. Still, in the future it should be possible to reduce its size by converting PNP drivers that need not be PNP for any technical reasons into platform drivers. Signed-off-by: Zhang Rui [rjw: Rewrote the changelog, modified the PNP ACPI scan handler code] Signed-off-by: Rafael J. Wysocki Reviewed-by: Mika Westerberg --- drivers/acpi/Makefile | 1 + drivers/acpi/acpi_cmos_rtc.c | 2 +- drivers/acpi/acpi_pnp.c | 401 +++++++++++++++++++++++++++++++++++++++++++ drivers/acpi/internal.h | 1 + drivers/acpi/scan.c | 1 + drivers/pnp/pnpacpi/core.c | 28 +-- include/linux/acpi.h | 2 + 7 files changed, 411 insertions(+), 25 deletions(-) create mode 100644 drivers/acpi/acpi_pnp.c (limited to 'include/linux') diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index bce34afadcd0..144671a2030f 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -41,6 +41,7 @@ acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-y += pci_root.o pci_link.o pci_irq.o acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o acpi-y += acpi_platform.o +acpi-y += acpi_pnp.o acpi-y += power.o acpi-y += event.o acpi-y += sysfs.o diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c index 961b45d18a5d..2da8660262e5 100644 --- a/drivers/acpi/acpi_cmos_rtc.c +++ b/drivers/acpi/acpi_cmos_rtc.c @@ -68,7 +68,7 @@ static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev, return -ENODEV; } - return 0; + return 1; } static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev) diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c new file mode 100644 index 000000000000..567e7fc6330c --- /dev/null +++ b/drivers/acpi/acpi_pnp.c @@ -0,0 +1,401 @@ +/* + * ACPI support for PNP bus type + * + * Copyright (C) 2014, Intel Corporation + * Authors: Zhang Rui + * Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +static const struct acpi_device_id acpi_pnp_device_ids[] = { + /* pata_isapnp */ + {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ + /* floppy */ + {"PNP0700"}, + /* ipmi_si */ + {"IPI0001"}, + /* tpm_inf_pnp */ + {"IFX0101"}, /* Infineon TPMs */ + {"IFX0102"}, /* Infineon TPMs */ + /*tpm_tis */ + {"PNP0C31"}, /* TPM */ + {"ATM1200"}, /* Atmel */ + {"IFX0102"}, /* Infineon */ + {"BCM0101"}, /* Broadcom */ + {"BCM0102"}, /* Broadcom */ + {"NSC1200"}, /* National */ + {"ICO0102"}, /* Intel */ + /* ide */ + {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ + /* ns558 */ + {"@P@0001"}, /* ALS 100 */ + {"@P@0020"}, /* ALS 200 */ + {"@P@1001"}, /* ALS 100+ */ + {"@P@2001"}, /* ALS 120 */ + {"ASB16fd"}, /* AdLib NSC16 */ + {"AZT3001"}, /* AZT1008 */ + {"CDC0001"}, /* Opl3-SAx */ + {"CSC0001"}, /* CS4232 */ + {"CSC000f"}, /* CS4236 */ + {"CSC0101"}, /* CS4327 */ + {"CTL7001"}, /* SB16 */ + {"CTL7002"}, /* AWE64 */ + {"CTL7005"}, /* Vibra16 */ + {"ENS2020"}, /* SoundscapeVIVO */ + {"ESS0001"}, /* ES1869 */ + {"ESS0005"}, /* ES1878 */ + {"ESS6880"}, /* ES688 */ + {"IBM0012"}, /* CS4232 */ + {"OPT0001"}, /* OPTi Audio16 */ + {"YMH0006"}, /* Opl3-SA */ + {"YMH0022"}, /* Opl3-SAx */ + {"PNPb02f"}, /* Generic */ + /* i8042 kbd */ + {"PNP0300"}, + {"PNP0301"}, + {"PNP0302"}, + {"PNP0303"}, + {"PNP0304"}, + {"PNP0305"}, + {"PNP0306"}, + {"PNP0309"}, + {"PNP030a"}, + {"PNP030b"}, + {"PNP0320"}, + {"PNP0343"}, + {"PNP0344"}, + {"PNP0345"}, + {"CPQA0D7"}, + /* i8042 aux */ + {"AUI0200"}, + {"FJC6000"}, + {"FJC6001"}, + {"PNP0f03"}, + {"PNP0f0b"}, + {"PNP0f0e"}, + {"PNP0f12"}, + {"PNP0f13"}, + {"PNP0f19"}, + {"PNP0f1c"}, + {"SYN0801"}, + /* fcpnp */ + {"AVM0900"}, + /* radio-cadet */ + {"MSM0c24"}, /* ADS Cadet AM/FM Radio Card */ + /* radio-gemtek */ + {"ADS7183"}, /* AOpen FX-3D/Pro Radio */ + /* radio-sf16fmr2 */ + {"MFRad13"}, /* tuner subdevice of SF16-FMD2 */ + /* ene_ir */ + {"ENE0100"}, + {"ENE0200"}, + {"ENE0201"}, + {"ENE0202"}, + /* fintek-cir */ + {"FIT0002"}, /* CIR */ + /* ite-cir */ + {"ITE8704"}, /* Default model */ + {"ITE8713"}, /* CIR found in EEEBox 1501U */ + {"ITE8708"}, /* Bridged IT8512 */ + {"ITE8709"}, /* SRAM-Bridged IT8512 */ + /* nuvoton-cir */ + {"WEC0530"}, /* CIR */ + {"NTN0530"}, /* CIR for new chip's pnp id */ + /* Winbond CIR */ + {"WEC1022"}, + /* wbsd */ + {"WEC0517"}, + {"WEC0518"}, + /* Winbond CIR */ + {"TCM5090"}, /* 3Com Etherlink III (TP) */ + {"TCM5091"}, /* 3Com Etherlink III */ + {"TCM5094"}, /* 3Com Etherlink III (combo) */ + {"TCM5095"}, /* 3Com Etherlink III (TPO) */ + {"TCM5098"}, /* 3Com Etherlink III (TPC) */ + {"PNP80f7"}, /* 3Com Etherlink III compatible */ + {"PNP80f8"}, /* 3Com Etherlink III compatible */ + /* nsc-ircc */ + {"NSC6001"}, + {"HWPC224"}, + {"IBM0071"}, + /* smsc-ircc2 */ + {"SMCf010"}, + /* sb1000 */ + {"GIC1000"}, + /* parport_pc */ + {"PNP0400"}, /* Standard LPT Printer Port */ + {"PNP0401"}, /* ECP Printer Port */ + /* apple-gmux */ + {"APP000B"}, + /* fujitsu-laptop.c */ + {"FUJ02bf"}, + {"FUJ02B1"}, + {"FUJ02E3"}, + /* system */ + {"PNP0c02"}, /* General ID for reserving resources */ + {"PNP0c01"}, /* memory controller */ + /* rtc_cmos */ + {"PNP0b00"}, + {"PNP0b01"}, + {"PNP0b02"}, + /* c6xdigio */ + {"PNP0400"}, /* Standard LPT Printer Port */ + {"PNP0401"}, /* ECP Printer Port */ + /* ni_atmio.c */ + {"NIC1900"}, + {"NIC2400"}, + {"NIC2500"}, + {"NIC2600"}, + {"NIC2700"}, + /* serial */ + {"AAC000F"}, /* Archtek America Corp. Archtek SmartLink Modem 3334BT Plug & Play */ + {"ADC0001"}, /* Anchor Datacomm BV. SXPro 144 External Data Fax Modem Plug & Play */ + {"ADC0002"}, /* SXPro 288 External Data Fax Modem Plug & Play */ + {"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */ + {"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */ + {"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */ + {"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */ + {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */ + {"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */ + {"BRI1400"}, /* Boca Research 33,600 ACF Modem */ + {"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */ + {"BRI0A49"}, /* Boca 33.6 Kbps Internal FD34FSVD */ + {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */ + {"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */ + {"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */ + {"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */ + {"DAV0336"}, /* Davicom ISA 33.6K Modem */ + {"DMB1032"}, /* Creative Modem Blaster Flash56 DI5601-1 */ + {"DMB2001"}, /* Creative Modem Blaster V.90 DI5660 */ + {"ETT0002"}, /* E-Tech CyberBULLET PC56RVP */ + {"FUJ0202"}, /* Fujitsu 33600 PnP-I2 R Plug & Play */ + {"FUJ0205"}, /* Fujitsu FMV-FX431 Plug & Play */ + {"FUJ0206"}, /* Fujitsu 33600 PnP-I4 R Plug & Play */ + {"FUJ0209"}, /* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */ + {"GVC000F"}, /* Archtek SmartLink Modem 3334BT Plug & Play */ + {"GVC0303"}, /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ + {"HAY0001"}, /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ + {"HAY000C"}, /* Hayes Optima 336 V.34 + FAX + Voice PnP */ + {"HAY000D"}, /* Hayes Optima 336B V.34 + FAX + Voice PnP */ + {"HAY5670"}, /* Hayes Accura 56K Ext Fax Modem PnP */ + {"HAY5674"}, /* Hayes Accura 56K Ext Fax Modem PnP */ + {"HAY5675"}, /* Hayes Accura 56K Fax Modem PnP */ + {"HAYF000"}, /* Hayes 288, V.34 + FAX */ + {"HAYF001"}, /* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */ + {"IBM0033"}, /* IBM Thinkpad 701 Internal Modem Voice */ + {"PNP4972"}, /* Intermec CV60 touchscreen port */ + {"IXDC801"}, /* Intertex 28k8 33k6 Voice EXT PnP */ + {"IXDC901"}, /* Intertex 33k6 56k Voice EXT PnP */ + {"IXDD801"}, /* Intertex 28k8 33k6 Voice SP EXT PnP */ + {"IXDD901"}, /* Intertex 33k6 56k Voice SP EXT PnP */ + {"IXDF401"}, /* Intertex 28k8 33k6 Voice SP INT PnP */ + {"IXDF801"}, /* Intertex 28k8 33k6 Voice SP EXT PnP */ + {"IXDF901"}, /* Intertex 33k6 56k Voice SP EXT PnP */ + {"KOR4522"}, /* KORTEX 28800 Externe PnP */ + {"KORF661"}, /* KXPro 33.6 Vocal ASVD PnP */ + {"LAS4040"}, /* LASAT Internet 33600 PnP */ + {"LAS4540"}, /* Lasat Safire 560 PnP */ + {"LAS5440"}, /* Lasat Safire 336 PnP */ + {"MNP0281"}, /* Microcom TravelPorte FAST V.34 Plug & Play */ + {"MNP0336"}, /* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */ + {"MNP0339"}, /* Microcom DeskPorte FAST EP 28.8 Plug & Play */ + {"MNP0342"}, /* Microcom DeskPorte 28.8P Plug & Play */ + {"MNP0500"}, /* Microcom DeskPorte FAST ES 28.8 Plug & Play */ + {"MNP0501"}, /* Microcom DeskPorte FAST ES 28.8 Plug & Play */ + {"MNP0502"}, /* Microcom DeskPorte 28.8S Internal Plug & Play */ + {"MOT1105"}, /* Motorola BitSURFR Plug & Play */ + {"MOT1111"}, /* Motorola TA210 Plug & Play */ + {"MOT1114"}, /* Motorola HMTA 200 (ISDN) Plug & Play */ + {"MOT1115"}, /* Motorola BitSURFR Plug & Play */ + {"MOT1190"}, /* Motorola Lifestyle 28.8 Internal */ + {"MOT1501"}, /* Motorola V.3400 Plug & Play */ + {"MOT1502"}, /* Motorola Lifestyle 28.8 V.34 Plug & Play */ + {"MOT1505"}, /* Motorola Power 28.8 V.34 Plug & Play */ + {"MOT1509"}, /* Motorola ModemSURFR External 28.8 Plug & Play */ + {"MOT150A"}, /* Motorola Premier 33.6 Desktop Plug & Play */ + {"MOT150F"}, /* Motorola VoiceSURFR 56K External PnP */ + {"MOT1510"}, /* Motorola ModemSURFR 56K External PnP */ + {"MOT1550"}, /* Motorola ModemSURFR 56K Internal PnP */ + {"MOT1560"}, /* Motorola ModemSURFR Internal 28.8 Plug & Play */ + {"MOT1580"}, /* Motorola Premier 33.6 Internal Plug & Play */ + {"MOT15B0"}, /* Motorola OnlineSURFR 28.8 Internal Plug & Play */ + {"MOT15F0"}, /* Motorola VoiceSURFR 56K Internal PnP */ + {"MVX00A1"}, /* Deskline K56 Phone System PnP */ + {"MVX00F2"}, /* PC Rider K56 Phone System PnP */ + {"nEC8241"}, /* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */ + {"PMC2430"}, /* Pace 56 Voice Internal Plug & Play Modem */ + {"PNP0500"}, /* Generic standard PC COM port */ + {"PNP0501"}, /* Generic 16550A-compatible COM port */ + {"PNPC000"}, /* Compaq 14400 Modem */ + {"PNPC001"}, /* Compaq 2400/9600 Modem */ + {"PNPC031"}, /* Dial-Up Networking Serial Cable between 2 PCs */ + {"PNPC032"}, /* Dial-Up Networking Parallel Cable between 2 PCs */ + {"PNPC100"}, /* Standard 9600 bps Modem */ + {"PNPC101"}, /* Standard 14400 bps Modem */ + {"PNPC102"}, /* Standard 28800 bps Modem */ + {"PNPC103"}, /* Standard Modem */ + {"PNPC104"}, /* Standard 9600 bps Modem */ + {"PNPC105"}, /* Standard 14400 bps Modem */ + {"PNPC106"}, /* Standard 28800 bps Modem */ + {"PNPC107"}, /* Standard Modem */ + {"PNPC108"}, /* Standard 9600 bps Modem */ + {"PNPC109"}, /* Standard 14400 bps Modem */ + {"PNPC10A"}, /* Standard 28800 bps Modem */ + {"PNPC10B"}, /* Standard Modem */ + {"PNPC10C"}, /* Standard 9600 bps Modem */ + {"PNPC10D"}, /* Standard 14400 bps Modem */ + {"PNPC10E"}, /* Standard 28800 bps Modem */ + {"PNPC10F"}, /* Standard Modem */ + {"PNP2000"}, /* Standard PCMCIA Card Modem */ + {"ROK0030"}, /* Rockwell 33.6 DPF Internal PnP, Modular Technology 33.6 Internal PnP */ + {"ROK0100"}, /* KORTEX 14400 Externe PnP */ + {"ROK4120"}, /* Rockwell 28.8 */ + {"ROK4920"}, /* Viking 28.8 INTERNAL Fax+Data+Voice PnP */ + {"RSS00A0"}, /* Rockwell 33.6 DPF External PnP, BT Prologue 33.6 External PnP, Modular Technology 33.6 External PnP */ + {"RSS0262"}, /* Viking 56K FAX INT */ + {"RSS0250"}, /* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */ + {"SUP1310"}, /* SupraExpress 28.8 Data/Fax PnP modem */ + {"SUP1381"}, /* SupraExpress 336i PnP Voice Modem */ + {"SUP1421"}, /* SupraExpress 33.6 Data/Fax PnP modem */ + {"SUP1590"}, /* SupraExpress 33.6 Data/Fax PnP modem */ + {"SUP1620"}, /* SupraExpress 336i Sp ASVD */ + {"SUP1760"}, /* SupraExpress 33.6 Data/Fax PnP modem */ + {"SUP2171"}, /* SupraExpress 56i Sp Intl */ + {"TEX0011"}, /* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */ + {"UAC000F"}, /* Archtek SmartLink Modem 3334BT Plug & Play */ + {"USR0000"}, /* 3Com Corp. Gateway Telepath IIvi 33.6 */ + {"USR0002"}, /* U.S. Robotics Sporster 33.6K Fax INT PnP */ + {"USR0004"}, /* Sportster Vi 14.4 PnP FAX Voicemail */ + {"USR0006"}, /* U.S. Robotics 33.6K Voice INT PnP */ + {"USR0007"}, /* U.S. Robotics 33.6K Voice EXT PnP */ + {"USR0009"}, /* U.S. Robotics Courier V.Everything INT PnP */ + {"USR2002"}, /* U.S. Robotics 33.6K Voice INT PnP */ + {"USR2070"}, /* U.S. Robotics 56K Voice INT PnP */ + {"USR2080"}, /* U.S. Robotics 56K Voice EXT PnP */ + {"USR3031"}, /* U.S. Robotics 56K FAX INT */ + {"USR3050"}, /* U.S. Robotics 56K FAX INT */ + {"USR3070"}, /* U.S. Robotics 56K Voice INT PnP */ + {"USR3080"}, /* U.S. Robotics 56K Voice EXT PnP */ + {"USR3090"}, /* U.S. Robotics 56K Voice INT PnP */ + {"USR9100"}, /* U.S. Robotics 56K Message */ + {"USR9160"}, /* U.S. Robotics 56K FAX EXT PnP */ + {"USR9170"}, /* U.S. Robotics 56K FAX INT PnP */ + {"USR9180"}, /* U.S. Robotics 56K Voice EXT PnP */ + {"USR9190"}, /* U.S. Robotics 56K Voice INT PnP */ + {"WACFXXX"}, /* Wacom tablets */ + {"FPI2002"}, /* Compaq touchscreen */ + {"FUJ02B2"}, /* Fujitsu Stylistic touchscreens */ + {"FUJ02B3"}, + {"FUJ02B4"}, /* Fujitsu Stylistic LT touchscreens */ + {"FUJ02B6"}, /* Passive Fujitsu Stylistic touchscreens */ + {"FUJ02B7"}, + {"FUJ02B8"}, + {"FUJ02B9"}, + {"FUJ02BC"}, + {"FUJ02E5"}, /* Fujitsu Wacom Tablet PC device */ + {"FUJ02E6"}, /* Fujitsu P-series tablet PC device */ + {"FUJ02E7"}, /* Fujitsu Wacom 2FGT Tablet PC device */ + {"FUJ02E9"}, /* Fujitsu Wacom 1FGT Tablet PC device */ + {"LTS0001"}, /* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in disguise) */ + {"WCI0003"}, /* Rockwell's (PORALiNK) 33600 INT PNP */ + {"WEC1022"}, /* Winbond CIR port, should not be probed. We should keep track of it to prevent the legacy serial driver from probing it */ + {"PNPCXXX"}, /* Unknown PnP modems */ + {"PNPDXXX"}, /* More unknown PnP modems */ + /* scl200wdt */ + {"NSC0800"}, /* National Semiconductor PC87307/PC97307 watchdog component */ + /* mpu401 */ + {"PNPb006"}, + /* cs423x-pnpbios */ + {"CSC0100"}, + {"CSC0000"}, + {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ + /* es18xx-pnpbios */ + {"ESS1869"}, + {"ESS1879"}, + /* snd-opl3sa2-pnpbios */ + {"YMH0021"}, + {"NMX2210"}, /* Gateway Solo 2500 */ + {""}, +}; + +static bool is_hex_digit(char c) +{ + return (c >= 0 && c <= '9') || (c >= 'A' && c <= 'F'); +} + +static bool matching_id(char *idstr, char *list_id) +{ + int i; + + if (memcmp(idstr, list_id, 3)) + return false; + + for (i = 3; i < 7; i++) { + char c = toupper(idstr[i]); + + if (!is_hex_digit(c) + || (list_id[i] != 'X' && c != toupper(list_id[i]))) + return false; + } + return true; +} + +static bool acpi_pnp_match(char *idstr, const struct acpi_device_id **matchid) +{ + const struct acpi_device_id *devid; + + for (devid = acpi_pnp_device_ids; devid->id[0]; devid++) + if (matching_id(idstr, (char *)devid->id)) { + if (matchid) + *matchid = devid; + + return true; + } + + return false; +} + +static int acpi_pnp_attach(struct acpi_device *adev, + const struct acpi_device_id *id) +{ + return 1; +} + +static struct acpi_scan_handler acpi_pnp_handler = { + .ids = acpi_pnp_device_ids, + .match = acpi_pnp_match, + .attach = acpi_pnp_attach, +}; + +/* + * For CMOS RTC devices, the PNP ACPI scan handler does not work, because + * there is a CMOS RTC ACPI scan handler installed already, so we need to + * check those devices and enumerate them to the PNP bus directly. + */ +static int is_cmos_rtc_device(struct acpi_device *adev) +{ + struct acpi_device_id ids[] = { + { "PNP0B00" }, + { "PNP0B01" }, + { "PNP0B02" }, + {""}, + }; + return !acpi_match_device_ids(adev, ids); +} + +bool acpi_is_pnp_device(struct acpi_device *adev) +{ + return adev->handler == &acpi_pnp_handler || is_cmos_rtc_device(adev); +} +EXPORT_SYMBOL_GPL(acpi_is_pnp_device); + +void __init acpi_pnp_init(void) +{ + acpi_scan_add_handler(&acpi_pnp_handler); +} diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index bb7de413d06d..5c16cb6bc76d 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -30,6 +30,7 @@ void acpi_pci_root_init(void); void acpi_pci_link_init(void); void acpi_processor_init(void); void acpi_platform_init(void); +void acpi_pnp_init(void); int acpi_sysfs_init(void); #ifdef CONFIG_ACPI_CONTAINER void acpi_container_init(void); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e44438f7917b..19d524c5c0c8 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2253,6 +2253,7 @@ int __init acpi_scan_init(void) acpi_cmos_rtc_init(); acpi_container_init(); acpi_memory_hotplug_init(); + acpi_pnp_init(); mutex_lock(&acpi_scan_lock); /* diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index c31aa07b3ba5..b81448b2c75d 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -30,26 +30,6 @@ static int num; -/* We need only to blacklist devices that have already an acpi driver that - * can't use pnp layer. We don't need to blacklist device that are directly - * used by the kernel (PCI root, ...), as it is harmless and there were - * already present in pnpbios. But there is an exception for devices that - * have irqs (PIC, Timer) because we call acpi_register_gsi. - * Finally, only devices that have a CRS method need to be in this list. - */ -static struct acpi_device_id excluded_id_list[] __initdata = { - {"PNP0C09", 0}, /* EC */ - {"PNP0C0F", 0}, /* Link device */ - {"PNP0000", 0}, /* PIC */ - {"PNP0100", 0}, /* Timer */ - {"", 0}, -}; - -static inline int __init is_exclusive_device(struct acpi_device *dev) -{ - return (!acpi_match_device_ids(dev, excluded_id_list)); -} - /* * Compatible Device IDs */ @@ -266,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device) if (!pnpid) return 0; - if (is_exclusive_device(device) || !device->status.present) + if (!device->status.present) return 0; dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); @@ -326,10 +306,10 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, { struct acpi_device *device; - if (!acpi_bus_get_device(handle, &device)) - pnpacpi_add_device(device); - else + if (acpi_bus_get_device(handle, &device)) return AE_CTRL_DEPTH; + if (acpi_is_pnp_device(device)) + pnpacpi_add_device(device); return AE_OK; } diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4c007262e891..0b9927f4edd2 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -184,6 +184,8 @@ extern int ec_transaction(u8 command, u8 *rdata, unsigned rdata_len); extern acpi_handle ec_get_handle(void); +extern bool acpi_is_pnp_device(struct acpi_device *); + #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) typedef void (*wmi_notify_handler) (u32 value, void *context); -- cgit v1.2.3 From 2230237500821aedfcf2bba2a79d9cbca389233c Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 30 May 2014 08:06:42 -0600 Subject: blk-mq: blk_mq_tag_to_rq should handle flush request flush request is special, which borrows the tag from the parent request. Hence blk_mq_tag_to_rq needs special handling to return the flush request from the tag. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/blk-flush.c | 4 +++- block/blk-mq.c | 12 +++++++++--- include/linux/blk-mq.h | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/block/blk-flush.c b/block/blk-flush.c index ef608b35d9be..ff87c664b7df 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -223,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error) struct request *rq, *n; unsigned long flags = 0; - if (q->mq_ops) + if (q->mq_ops) { spin_lock_irqsave(&q->mq_flush_lock, flags); + q->flush_rq->cmd_flags = 0; + } running = &q->flush_queue[q->flush_running_idx]; BUG_ON(q->flush_pending_idx == q->flush_running_idx); diff --git a/block/blk-mq.c b/block/blk-mq.c index 6160128085fc..21f952ab3581 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -541,9 +541,15 @@ void blk_mq_kick_requeue_list(struct request_queue *q) } EXPORT_SYMBOL(blk_mq_kick_requeue_list); -struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) +struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) { - return tags->rqs[tag]; + struct request_queue *q = hctx->queue; + + if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && + q->flush_rq->tag == tag) + return q->flush_rq; + + return hctx->tags->rqs[tag]; } EXPORT_SYMBOL(blk_mq_tag_to_rq); @@ -572,7 +578,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) if (tag >= hctx->tags->nr_tags) break; - rq = blk_mq_tag_to_rq(hctx->tags, tag++); + rq = blk_mq_tag_to_rq(hctx, tag++); if (rq->q != hctx->queue) continue; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 95de239444d2..ad3adb73cc70 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -154,7 +154,7 @@ void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); -struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); +struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -- cgit v1.2.3 From 67aec14ce87fe25bdfff7dbf468556333df11c4e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 30 May 2014 08:25:36 -0600 Subject: blk-mq: make the sysfs mq/ layout reflect current mappings Currently blk-mq registers all the hardware queues in sysfs, regardless of whether it uses them (e.g. they have CPU mappings) or not. The unused hardware queues lack the cpux/ directories, and the other sysfs entries (like active, pending, etc) are all zeroes. Change this so that sysfs correctly reflects the current mappings of the hardware queues. Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 102 ++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq.c | 4 ++ block/blk-mq.h | 6 +++ include/linux/blk-mq.h | 1 + 4 files changed, 94 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 99a60a829e69..e5f575ff0bf9 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -327,6 +327,42 @@ static struct kobj_type blk_mq_hw_ktype = { .release = blk_mq_sysfs_release, }; +void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_ctx *ctx; + int i; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return; + + hctx_for_each_ctx(hctx, ctx, i) + kobject_del(&ctx->kobj); + + kobject_del(&hctx->kobj); +} + +int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + int i, ret; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return 0; + + ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); + if (ret) + return ret; + + hctx_for_each_ctx(hctx, ctx, i) { + ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); + if (ret) + break; + } + + return ret; +} + void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; @@ -335,11 +371,11 @@ void blk_mq_unregister_disk(struct gendisk *disk) int i, j; queue_for_each_hw_ctx(q, hctx, i) { - hctx_for_each_ctx(hctx, ctx, j) { - kobject_del(&ctx->kobj); + blk_mq_unregister_hctx(hctx); + + hctx_for_each_ctx(hctx, ctx, j) kobject_put(&ctx->kobj); - } - kobject_del(&hctx->kobj); + kobject_put(&hctx->kobj); } @@ -350,15 +386,30 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&disk_to_dev(disk)->kobj); } +static void blk_mq_sysfs_init(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + kobject_init(&q->mq_kobj, &blk_mq_ktype); + + queue_for_each_hw_ctx(q, hctx, i) { + kobject_init(&hctx->kobj, &blk_mq_hw_ktype); + + hctx_for_each_ctx(hctx, ctx, j) + kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); + } +} + int blk_mq_register_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - int ret, i, j; + int ret, i; - kobject_init(&q->mq_kobj, &blk_mq_ktype); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) @@ -367,20 +418,10 @@ int blk_mq_register_disk(struct gendisk *disk) kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - kobject_init(&hctx->kobj, &blk_mq_hw_ktype); - ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i); + hctx->flags |= BLK_MQ_F_SYSFS_UP; + ret = blk_mq_register_hctx(hctx); if (ret) break; - - if (!hctx->nr_ctx) - continue; - - hctx_for_each_ctx(hctx, ctx, j) { - kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); - ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); - if (ret) - break; - } } if (ret) { @@ -390,3 +431,26 @@ int blk_mq_register_disk(struct gendisk *disk) return 0; } + +void blk_mq_sysfs_unregister(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_unregister_hctx(hctx); +} + +int blk_mq_sysfs_register(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i, ret = 0; + + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_register_hctx(hctx); + if (ret) + break; + } + + return ret; +} diff --git a/block/blk-mq.c b/block/blk-mq.c index 21f952ab3581..71f564e8812e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1924,6 +1924,8 @@ static void blk_mq_queue_reinit(struct request_queue *q) { blk_mq_freeze_queue(q); + blk_mq_sysfs_unregister(q); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); /* @@ -1934,6 +1936,8 @@ static void blk_mq_queue_reinit(struct request_queue *q) blk_mq_map_swqueue(q); + blk_mq_sysfs_register(q); + blk_mq_unfreeze_queue(q); } diff --git a/block/blk-mq.h b/block/blk-mq.h index ff5e6bf0f691..de7b3bbd5bd6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -54,6 +54,12 @@ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); +/* + * sysfs helpers + */ +extern int blk_mq_sysfs_register(struct request_queue *q); +extern void blk_mq_sysfs_unregister(struct request_queue *q); + /* * Basic implementation of sparser bitmap, allowing the user to spread * the bits over more cachelines. diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ad3adb73cc70..c15128833100 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -130,6 +130,7 @@ enum { BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_TAG_SHARED = 1 << 2, BLK_MQ_F_SG_MERGE = 1 << 3, + BLK_MQ_F_SYSFS_UP = 1 << 4, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, -- cgit v1.2.3 From c893d133eaccdda2516a3e71cd05a7dac2e14b00 Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Fri, 30 May 2014 11:01:03 +0800 Subject: PCI: Make pci_bus_add_device() void pci_bus_add_device() always returns 0, so there's no point in returning anything at all. Make it a void function and remove the tests of the return value from the callers. [bhelgaas: changelog, remove unused "err" from i82875p_setup_overfl_dev()] Signed-off-by: Yijing Wang Signed-off-by: Bjorn Helgaas --- drivers/edac/i82875p_edac.c | 8 +------- drivers/pci/bus.c | 10 ++-------- drivers/pci/iov.c | 2 +- drivers/platform/x86/asus-wmi.c | 3 +-- drivers/platform/x86/eeepc-laptop.c | 3 +-- include/linux/pci.h | 2 +- 6 files changed, 7 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 8d0450b9b9af..4009077c8839 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -275,7 +275,6 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, { struct pci_dev *dev; void __iomem *window; - int err; *ovrfl_pdev = NULL; *ovrfl_window = NULL; @@ -293,12 +292,7 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, if (dev == NULL) return 1; - err = pci_bus_add_device(dev); - if (err) { - i82875p_printk(KERN_ERR, - "%s(): pci_bus_add_device() Failed\n", - __func__); - } + pci_bus_add_device(dev); pci_bus_assign_resources(dev->bus); } diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index ba2bf55a38df..447d393725e1 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -235,7 +235,7 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } * * This adds add sysfs entries and start device drivers */ -int pci_bus_add_device(struct pci_dev *dev) +void pci_bus_add_device(struct pci_dev *dev) { int retval; @@ -252,8 +252,6 @@ int pci_bus_add_device(struct pci_dev *dev) WARN_ON(retval < 0); dev->is_added = 1; - - return 0; } /** @@ -266,16 +264,12 @@ void pci_bus_add_devices(const struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child; - int retval; list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip already-added devices */ if (dev->is_added) continue; - retval = pci_bus_add_device(dev); - if (retval) - dev_err(&dev->dev, "Error adding device (%d)\n", - retval); + pci_bus_add_device(dev); } list_for_each_entry(dev, &bus->devices, bus_list) { diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index de7a74782f92..cb6f24740ee3 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -106,7 +106,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) pci_device_add(virtfn, virtfn->bus); mutex_unlock(&iov->dev->sriov->lock); - rc = pci_bus_add_device(virtfn); + pci_bus_add_device(virtfn); sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index c5e082fb82fa..91ef69a52263 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -642,8 +642,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus) dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); - if (pci_bus_add_device(dev)) - pr_err("Unable to hotplug wifi\n"); + pci_bus_add_device(dev); } } else { dev = pci_get_slot(bus, 0); diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 399e8c562192..9b0c57cd1d4a 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -633,8 +633,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); - if (pci_bus_add_device(dev)) - pr_err("Unable to hotplug wifi\n"); + pci_bus_add_device(dev); } } else { dev = pci_get_slot(bus, 0); diff --git a/include/linux/pci.h b/include/linux/pci.h index 322335aaa7e1..785149a6aec1 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -764,7 +764,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn); struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); unsigned int pci_scan_child_bus(struct pci_bus *bus); -int __must_check pci_bus_add_device(struct pci_dev *dev); +void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); -- cgit v1.2.3 From b13460b92093b29347e99d6c3242e350052b62cd Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Fri, 30 May 2014 11:35:54 -0600 Subject: drivers/vfio: Rework offsetofend() The macro offsetofend() introduces unnecessary temporary variable "tmp". The patch avoids that and saves a bit memory in stack. Signed-off-by: Gavin Shan Signed-off-by: Alex Williamson --- include/linux/vfio.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 81022a52bc34..8ec980b5e3af 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -86,9 +86,8 @@ extern void vfio_unregister_iommu_driver( * from user space. This allows us to easily determine if the provided * structure is sized to include various fields. */ -#define offsetofend(TYPE, MEMBER) ({ \ - TYPE tmp; \ - offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \ +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) /* * External user API -- cgit v1.2.3 From 8c3a05b489ef097f86bf87c64192456553f57781 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Tue, 20 May 2014 06:45:54 +0200 Subject: mmc: mmci: Enforce DMA configuration through DT Remove the option to provide DMA configuration as platform data, enforce it through DT. Signed-off-by: Ulf Hansson Cc: Russell King Cc: Roland Stigge Acked-by: Arnd Bergmann --- arch/arm/mach-lpc32xx/phy3250.c | 3 --- drivers/mmc/host/mmci.c | 24 +----------------------- include/linux/amba/mmci.h | 17 ----------------- 3 files changed, 1 insertion(+), 43 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index 34932e0e31fa..7858d5b6f6ce 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -202,9 +202,6 @@ static struct mmci_platform_data lpc32xx_mmci_data = { .ocr_mask = MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | MMC_VDD_33_34, .ios_handler = mmc_handle_ios, - .dma_filter = NULL, - /* No DMA for now since AMBA PL080 dmaengine driver only does scatter - * gather, and the MMCI driver doesn't do it this way */ }; static struct lpc32xx_slc_platform_data lpc32xx_slc_data = { diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 758efea184c9..a084edd37af5 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -366,7 +366,6 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) #ifdef CONFIG_DMA_ENGINE static void mmci_dma_setup(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; const char *rxname, *txname; dma_cap_mask_t mask; @@ -380,25 +379,6 @@ static void mmci_dma_setup(struct mmci_host *host) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - if (plat && plat->dma_filter) { - if (!host->dma_rx_channel && plat->dma_rx_param) { - host->dma_rx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_rx_param); - /* E.g if no DMA hardware is present */ - if (!host->dma_rx_channel) - dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); - } - - if (!host->dma_tx_channel && plat->dma_tx_param) { - host->dma_tx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_tx_param); - if (!host->dma_tx_channel) - dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); - } - } - /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is @@ -446,11 +426,9 @@ static void mmci_dma_setup(struct mmci_host *host) */ static inline void mmci_dma_release(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; - if (host->dma_rx_channel) dma_release_channel(host->dma_rx_channel); - if (host->dma_tx_channel && plat->dma_tx_param) + if (host->dma_tx_channel) dma_release_channel(host->dma_tx_channel); host->dma_rx_channel = host->dma_tx_channel = NULL; } diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 3f95d32d5277..8c98113069ce 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -6,9 +6,6 @@ #include -/* Just some dummy forwarding */ -struct dma_chan; - /** * struct mmci_platform_data - platform configuration for the MMCI * (also known as PL180) block. @@ -26,17 +23,6 @@ struct dma_chan; * @gpio_wp: read this GPIO pin to see if the card is write protected * @gpio_cd: read this GPIO pin to detect card insertion * @cd_invert: true if the gpio_cd pin value is active low - * @dma_filter: function used to select an appropriate RX and TX - * DMA channel to be used for DMA, if and only if you're deploying the - * generic DMA engine - * @dma_rx_param: parameter passed to the DMA allocation - * filter in order to select an appropriate RX channel. If - * there is a bidirectional RX+TX channel, then just specify - * this and leave dma_tx_param set to NULL - * @dma_tx_param: parameter passed to the DMA allocation - * filter in order to select an appropriate TX channel. If this - * is NULL the driver will attempt to use the RX channel as a - * bidirectional channel */ struct mmci_platform_data { unsigned int ocr_mask; @@ -45,9 +31,6 @@ struct mmci_platform_data { int gpio_wp; int gpio_cd; bool cd_invert; - bool (*dma_filter)(struct dma_chan *chan, void *filter_param); - void *dma_rx_param; - void *dma_tx_param; }; #endif -- cgit v1.2.3 From 9c5de2c1754c2bb3c69c4d7bf0d0edc0a61d8232 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 2 Jun 2014 15:38:05 +0200 Subject: spi: rspi: Remove unused 16-bit DMA support The 16-bit DMA support doesn't fit well within the SPI core DMA framework, as it needs to manage its own double-sized temporary buffers, for handling the interleaved data. Remove it, as there is no in-tree board code that sets rspi_plat_data.dma_width_16bit. Signed-off-by: Geert Uytterhoeven Signed-off-by: Mark Brown --- drivers/spi/spi-rspi.c | 84 ++++-------------------------------------------- include/linux/spi/rspi.h | 2 -- 2 files changed, 6 insertions(+), 80 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 57beda209599..3bd06fd9af47 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -201,7 +201,6 @@ struct rspi_data { struct dma_chan *chan_tx; struct dma_chan *chan_rx; - unsigned dma_width_16bit:1; unsigned dma_callbacked:1; unsigned byte_access:1; }; @@ -475,60 +474,17 @@ static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan, dma_unmap_sg(chan->device->dev, sg, 1, dir); } -static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len) -{ - u16 *dst = buf; - const u8 *src = data; - - while (len) { - *dst++ = (u16)(*src++); - len--; - } -} - -static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len) -{ - u8 *dst = buf; - const u16 *src = data; - - while (len) { - *dst++ = (u8)*src++; - len--; - } -} - static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) { struct scatterlist sg; - const void *buf = NULL; + const void *buf = t->tx_buf; struct dma_async_tx_descriptor *desc; - unsigned int len; + unsigned int len = t->len; int ret = 0; - if (rspi->dma_width_16bit) { - void *tmp; - /* - * If DMAC bus width is 16-bit, the driver allocates a dummy - * buffer. And, the driver converts original data into the - * DMAC data as the following format: - * original data: 1st byte, 2nd byte ... - * DMAC data: 1st byte, dummy, 2nd byte, dummy ... - */ - len = t->len * 2; - tmp = kmalloc(len, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - rspi_memory_to_8bit(tmp, t->tx_buf, t->len); - buf = tmp; - } else { - len = t->len; - buf = t->tx_buf; - } + if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) + return -EFAULT; - if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) { - ret = -EFAULT; - goto end_nomap; - } desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { @@ -563,10 +519,6 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) end: rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE); -end_nomap: - if (rspi->dma_width_16bit) - kfree(buf); - return ret; } @@ -603,28 +555,11 @@ static void qspi_receive_init(const struct rspi_data *rspi) static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) { struct scatterlist sg, sg_dummy; - void *dummy = NULL, *rx_buf = NULL; + void *dummy = NULL, *rx_buf = t->rx_buf; struct dma_async_tx_descriptor *desc, *desc_dummy; - unsigned int len; + unsigned int len = t->len; int ret = 0; - if (rspi->dma_width_16bit) { - /* - * If DMAC bus width is 16-bit, the driver allocates a dummy - * buffer. And, finally the driver converts the DMAC data into - * actual data as the following format: - * DMAC data: 1st byte, dummy, 2nd byte, dummy ... - * actual data: 1st byte, 2nd byte ... - */ - len = t->len * 2; - rx_buf = kmalloc(len, GFP_KERNEL); - if (!rx_buf) - return -ENOMEM; - } else { - len = t->len; - rx_buf = t->rx_buf; - } - /* prepare dummy transfer to generate SPI clocks */ dummy = kzalloc(len, GFP_KERNEL); if (!dummy) { @@ -697,11 +632,6 @@ end: end_dummy_mapped: rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE); end_nomap: - if (rspi->dma_width_16bit) { - if (!ret) - rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len); - kfree(rx_buf); - } kfree(dummy); return ret; @@ -1073,8 +1003,6 @@ static int rspi_request_dma(struct rspi_data *rspi, if (!res || !rspi_pd) return 0; /* The driver assumes no error. */ - rspi->dma_width_16bit = rspi_pd->dma_width_16bit; - /* If the module receives data by DMAC, it also needs TX DMAC */ if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { dma_cap_zero(mask); diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h index a25bd6f65e7f..e546b2ceb623 100644 --- a/include/linux/spi/rspi.h +++ b/include/linux/spi/rspi.h @@ -25,8 +25,6 @@ struct rspi_plat_data { unsigned int dma_tx_id; unsigned int dma_rx_id; - unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */ - u16 num_chipselect; }; -- cgit v1.2.3 From 58a9e5b98360e8dcf9c958c0552fb35279e3933f Mon Sep 17 00:00:00 2001 From: Michael Brunner Date: Tue, 8 Apr 2014 08:21:06 +0200 Subject: mfd: Add sysfs attributes for Kontron PLD firmware revision This patch adds attributes to the Kontron PLD driver to allow applications to retrieve firmware information. Additionally the format has been changed to conform with the representation in other Kontron software. Signed-off-by: Michael Brunner Reviewed-by: Guenter Roeck Signed-off-by: Lee Jones --- drivers/mfd/kempld-core.c | 127 +++++++++++++++++++++++++++++++++++++-------- include/linux/mfd/kempld.h | 4 ++ 2 files changed, 110 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index 07692604e119..25c5ca6797da 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -288,9 +288,38 @@ EXPORT_SYMBOL_GPL(kempld_release_mutex); */ static int kempld_get_info(struct kempld_device_data *pld) { + int ret; struct kempld_platform_data *pdata = dev_get_platdata(pld->dev); + char major, minor; + + ret = pdata->get_info(pld); + if (ret) + return ret; + + /* The Kontron PLD firmware version string has the following format: + * Pwxy.zzzz + * P: Fixed + * w: PLD number - 1 hex digit + * x: Major version - 1 alphanumerical digit (0-9A-V) + * y: Minor version - 1 alphanumerical digit (0-9A-V) + * zzzz: Build number - 4 zero padded hex digits */ - return pdata->get_info(pld); + if (pld->info.major < 10) + major = pld->info.major + '0'; + else + major = (pld->info.major - 10) + 'A'; + if (pld->info.minor < 10) + minor = pld->info.minor + '0'; + else + minor = (pld->info.minor - 10) + 'A'; + + ret = scnprintf(pld->info.version, sizeof(pld->info.version), + "P%X%c%c.%04X", pld->info.number, major, minor, + pld->info.buildnr); + if (ret < 0) + return ret; + + return 0; } /* @@ -307,9 +336,71 @@ static int kempld_register_cells(struct kempld_device_data *pld) return pdata->register_cells(pld); } +static const char *kempld_get_type_string(struct kempld_device_data *pld) +{ + const char *version_type; + + switch (pld->info.type) { + case 0: + version_type = "release"; + break; + case 1: + version_type = "debug"; + break; + case 2: + version_type = "custom"; + break; + default: + version_type = "unspecified"; + break; + } + + return version_type; +} + +static ssize_t kempld_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kempld_device_data *pld = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", pld->info.version); +} + +static ssize_t kempld_specification_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kempld_device_data *pld = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%d.%d\n", pld->info.spec_major, + pld->info.spec_minor); +} + +static ssize_t kempld_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kempld_device_data *pld = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", kempld_get_type_string(pld)); +} + +static DEVICE_ATTR(pld_version, S_IRUGO, kempld_version_show, NULL); +static DEVICE_ATTR(pld_specification, S_IRUGO, kempld_specification_show, + NULL); +static DEVICE_ATTR(pld_type, S_IRUGO, kempld_type_show, NULL); + +static struct attribute *pld_attributes[] = { + &dev_attr_pld_version.attr, + &dev_attr_pld_specification.attr, + &dev_attr_pld_type.attr, + NULL +}; + +static const struct attribute_group pld_attr_group = { + .attrs = pld_attributes, +}; + static int kempld_detect_device(struct kempld_device_data *pld) { - char *version_type; u8 index_reg; int ret; @@ -335,27 +426,19 @@ static int kempld_detect_device(struct kempld_device_data *pld) if (ret) return ret; - switch (pld->info.type) { - case 0: - version_type = "release"; - break; - case 1: - version_type = "debug"; - break; - case 2: - version_type = "custom"; - break; - default: - version_type = "unspecified"; - } + dev_info(pld->dev, "Found Kontron PLD - %s (%s), spec %d.%d\n", + pld->info.version, kempld_get_type_string(pld), + pld->info.spec_major, pld->info.spec_minor); + + ret = sysfs_create_group(&pld->dev->kobj, &pld_attr_group); + if (ret) + return ret; - dev_info(pld->dev, "Found Kontron PLD %d\n", pld->info.number); - dev_info(pld->dev, "%s version %d.%d build %d, specification %d.%d\n", - version_type, pld->info.major, pld->info.minor, - pld->info.buildnr, pld->info.spec_major, - pld->info.spec_minor); + ret = kempld_register_cells(pld); + if (ret) + sysfs_remove_group(&pld->dev->kobj, &pld_attr_group); - return kempld_register_cells(pld); + return ret; } static int kempld_probe(struct platform_device *pdev) @@ -399,6 +482,8 @@ static int kempld_remove(struct platform_device *pdev) struct kempld_device_data *pld = platform_get_drvdata(pdev); struct kempld_platform_data *pdata = dev_get_platdata(pld->dev); + sysfs_remove_group(&pld->dev->kobj, &pld_attr_group); + mfd_remove_devices(&pdev->dev); pdata->release_hardware_mutex(pld); diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h index b911ef3add03..26e0b469e567 100644 --- a/include/linux/mfd/kempld.h +++ b/include/linux/mfd/kempld.h @@ -51,6 +51,8 @@ #define KEMPLD_TYPE_DEBUG 0x1 #define KEMPLD_TYPE_CUSTOM 0x2 +#define KEMPLD_VERSION_LEN 10 + /** * struct kempld_info - PLD device information structure * @major: PLD major revision @@ -60,6 +62,7 @@ * @type: PLD type * @spec_major: PLD FW specification major revision * @spec_minor: PLD FW specification minor revision + * @version: PLD version string */ struct kempld_info { unsigned int major; @@ -69,6 +72,7 @@ struct kempld_info { unsigned int type; unsigned int spec_major; unsigned int spec_minor; + char version[KEMPLD_VERSION_LEN]; }; /** -- cgit v1.2.3 From 7abafa0a66414e385d122bcbc655a1d55ecbaecf Mon Sep 17 00:00:00 2001 From: Jay Aurabind Date: Wed, 21 May 2014 22:49:54 +0530 Subject: mfd: abx500-core: Fix compiler warning larger stack frame MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On systems with CONFIG_FRAME_WARN=1024, compiler warns the allocation of an object of struct device on stack. Make the allocation dynamically to fix the warning. Also change the caller's return type to int so as to account for error handling. drivers/mfd/abx500-core.c: In function ‘abx500_dump_all_banks’: drivers/mfd/abx500-core.c:167:1: warning: the frame size of 1032 bytes is larger than 1024 bytes [-Wframe-larger-than=] Signed-off-by: Aurabindo J Signed-off-by: Lee Jones --- drivers/mfd/abx500-core.c | 12 ++++++++---- include/linux/mfd/abx500.h | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c index f3a15aa54d7b..d6d0ec4d21e4 100644 --- a/drivers/mfd/abx500-core.c +++ b/drivers/mfd/abx500-core.c @@ -151,19 +151,23 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq) } EXPORT_SYMBOL(abx500_startup_irq_enabled); -void abx500_dump_all_banks(void) +int abx500_dump_all_banks(void) { struct abx500_ops *ops; - struct device dummy_child = {NULL}; + struct device *dummy_child; struct abx500_device_entry *dev_entry; + dummy_child = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!dummy_child) + return -ENOMEM; list_for_each_entry(dev_entry, &abx500_list, list) { - dummy_child.parent = dev_entry->dev; + dummy_child->parent = dev_entry->dev; ops = &dev_entry->ops; if ((ops != NULL) && (ops->dump_all_banks != NULL)) - ops->dump_all_banks(&dummy_child); + ops->dump_all_banks(dummy_child); } + kfree(dummy_child); } EXPORT_SYMBOL(abx500_dump_all_banks); diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 3301b2031c8d..df2508f7f3d2 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -330,7 +330,7 @@ int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, int abx500_get_chip_id(struct device *dev); int abx500_event_registers_startup_state_get(struct device *dev, u8 *event); int abx500_startup_irq_enabled(struct device *dev, unsigned int irq); -void abx500_dump_all_banks(void); +int abx500_dump_all_banks(void); struct abx500_ops { int (*get_chip_id) (struct device *); -- cgit v1.2.3 From d09b711a31ed891dc372039ccd347cdc8402da04 Mon Sep 17 00:00:00 2001 From: Alexander Shiyan Date: Wed, 16 Apr 2014 10:13:33 +0400 Subject: mfd: mc13xxx: Move definitions out of structures Signed-off-by: Alexander Shiyan Signed-off-by: Lee Jones --- include/linux/mfd/mc13xxx.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h index a326c850f046..d63b1d309106 100644 --- a/include/linux/mfd/mc13xxx.h +++ b/include/linux/mfd/mc13xxx.h @@ -117,10 +117,6 @@ struct mc13xxx_led_platform_data { #define MAX_LED_CONTROL_REGS 6 -struct mc13xxx_leds_platform_data { - struct mc13xxx_led_platform_data *led; - int num_leds; - /* MC13783 LED Control 0 */ #define MC13783_LED_C0_ENABLE (1 << 0) #define MC13783_LED_C0_TRIODE_MD (1 << 7) @@ -169,10 +165,13 @@ struct mc13xxx_leds_platform_data { /* MC34708 LED Control 0 */ #define MC34708_LED_C0_CURRENT_R(x) (((x) & 0x3) << 9) #define MC34708_LED_C0_CURRENT_G(x) (((x) & 0x3) << 21) + +struct mc13xxx_leds_platform_data { + struct mc13xxx_led_platform_data *led; + int num_leds; u32 led_control[MAX_LED_CONTROL_REGS]; }; -struct mc13xxx_buttons_platform_data { #define MC13783_BUTTON_DBNC_0MS 0 #define MC13783_BUTTON_DBNC_30MS 1 #define MC13783_BUTTON_DBNC_150MS 2 @@ -180,6 +179,8 @@ struct mc13xxx_buttons_platform_data { #define MC13783_BUTTON_ENABLE (1 << 2) #define MC13783_BUTTON_POL_INVERT (1 << 3) #define MC13783_BUTTON_RESET_EN (1 << 4) + +struct mc13xxx_buttons_platform_data { int b1on_flags; unsigned short b1on_key; int b2on_flags; @@ -188,14 +189,14 @@ struct mc13xxx_buttons_platform_data { unsigned short b3on_key; }; +#define MC13783_TS_ATO_FIRST false +#define MC13783_TS_ATO_EACH true + struct mc13xxx_ts_platform_data { /* Delay between Touchscreen polarization and ADC Conversion. * Given in clock ticks of a 32 kHz clock which gives a granularity of * about 30.5ms */ u8 ato; - -#define MC13783_TS_ATO_FIRST false -#define MC13783_TS_ATO_EACH true /* Use the ATO delay only for the first conversion or for each one */ bool atox; }; @@ -210,11 +211,12 @@ struct mc13xxx_codec_platform_data { enum mc13783_ssi_port dac_ssi_port; }; -struct mc13xxx_platform_data { -#define MC13XXX_USE_TOUCHSCREEN (1 << 0) +#define MC13XXX_USE_TOUCHSCREEN (1 << 0) #define MC13XXX_USE_CODEC (1 << 1) #define MC13XXX_USE_ADC (1 << 2) #define MC13XXX_USE_RTC (1 << 3) + +struct mc13xxx_platform_data { unsigned int flags; struct mc13xxx_regulator_platform_data regulators; -- cgit v1.2.3 From 3176a521922b8ebcf7a593063cc55344486d2cd7 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 15 Apr 2014 19:40:09 +0800 Subject: mfd: tps65218: Remove unused *rdev[] from struct tps65218 The *rdev[] is not used since commit 413be59e2f333 "regulator: tps65218: Remove unnecessary regulator_unregister call". Signed-off-by: Axel Lin Signed-off-by: Lee Jones --- include/linux/mfd/tps65218.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index d2e357df5a0e..2f9b593246ee 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -267,7 +267,6 @@ struct tps65218 { u32 irq_mask; struct regmap_irq_chip_data *irq_data; struct regulator_desc desc[TPS65218_NUM_REGULATOR]; - struct regulator_dev *rdev[TPS65218_NUM_REGULATOR]; struct tps_info *info[TPS65218_NUM_REGULATOR]; struct regmap *regmap; }; -- cgit v1.2.3 From e349c910e2398cbff59d7c58851503191a8e9157 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 14 Apr 2014 09:40:45 +0200 Subject: mfd/rtc: s5m: Do not allocate RTC I2C dummy and regmap for unsupported chipsets The rtc-s5m driver does not support all of S2M and S5M chipsets supported by main MFD sec-core driver. For such chipsets unsupported by rtc-s5m, the MFD sec-core driver initialized regmap with default config. This config in such cases wouldn't work at all. The main MFD sec-core driver shouldn't initialize regmap for child drivers which is not used by them and even not valid. Move the allocation of RTC I2C dummy device and initialization of RTC regmap from main MFD sec-core driver to the rtc-s5m driver. The rtc-s5m driver will use proper regmap config for supported devices. Signed-off-by: Krzysztof Kozlowski Acked-by: Alessandro Zummo Signed-off-by: Lee Jones --- drivers/mfd/sec-core.c | 53 +--------------------------- drivers/rtc/rtc-s5m.c | 75 +++++++++++++++++++++++++++++++++++++--- include/linux/mfd/samsung/core.h | 3 -- 3 files changed, 71 insertions(+), 60 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index 1cf27521fff4..d4682c6cbff5 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -196,20 +195,6 @@ static const struct regmap_config s5m8767_regmap_config = { .cache_type = REGCACHE_FLAT, }; -static const struct regmap_config s5m_rtc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - - .max_register = SEC_RTC_REG_MAX, -}; - -static const struct regmap_config s2mps14_rtc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - - .max_register = S2MPS_RTC_REG_MAX, -}; - #ifdef CONFIG_OF /* * Only the common platform data elements for s5m8767 are parsed here from the @@ -264,7 +249,7 @@ static int sec_pmic_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev); - const struct regmap_config *regmap, *regmap_rtc; + const struct regmap_config *regmap; struct sec_pmic_dev *sec_pmic; int ret; @@ -298,39 +283,21 @@ static int sec_pmic_probe(struct i2c_client *i2c, switch (sec_pmic->device_type) { case S2MPA01: regmap = &s2mpa01_regmap_config; - /* - * The rtc-s5m driver does not support S2MPA01 and there - * is no mfd_cell for S2MPA01 RTC device. - * However we must pass something to devm_regmap_init_i2c() - * so use S5M-like regmap config even though it wouldn't work. - */ - regmap_rtc = &s5m_rtc_regmap_config; break; case S2MPS11X: regmap = &s2mps11_regmap_config; - /* - * The rtc-s5m driver does not support S2MPS11 and there - * is no mfd_cell for S2MPS11 RTC device. - * However we must pass something to devm_regmap_init_i2c() - * so use S5M-like regmap config even though it wouldn't work. - */ - regmap_rtc = &s5m_rtc_regmap_config; break; case S2MPS14X: regmap = &s2mps14_regmap_config; - regmap_rtc = &s2mps14_rtc_regmap_config; break; case S5M8763X: regmap = &s5m8763_regmap_config; - regmap_rtc = &s5m_rtc_regmap_config; break; case S5M8767X: regmap = &s5m8767_regmap_config; - regmap_rtc = &s5m_rtc_regmap_config; break; default: regmap = &sec_regmap_config; - regmap_rtc = &s5m_rtc_regmap_config; break; } @@ -342,21 +309,6 @@ static int sec_pmic_probe(struct i2c_client *i2c, return ret; } - sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); - if (!sec_pmic->rtc) { - dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n"); - return -ENODEV; - } - i2c_set_clientdata(sec_pmic->rtc, sec_pmic); - - sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, regmap_rtc); - if (IS_ERR(sec_pmic->regmap_rtc)) { - ret = PTR_ERR(sec_pmic->regmap_rtc); - dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n", - ret); - goto err_regmap_rtc; - } - if (pdata && pdata->cfg_pmic_irq) pdata->cfg_pmic_irq(); @@ -403,8 +355,6 @@ static int sec_pmic_probe(struct i2c_client *i2c, err_mfd: sec_irq_exit(sec_pmic); -err_regmap_rtc: - i2c_unregister_device(sec_pmic->rtc); return ret; } @@ -414,7 +364,6 @@ static int sec_pmic_remove(struct i2c_client *i2c) mfd_remove_devices(sec_pmic->dev); sec_irq_exit(sec_pmic); - i2c_unregister_device(sec_pmic->rtc); return 0; } diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index 476af93543f6..8ec2d6a1dbe1 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -40,6 +40,7 @@ struct s5m_rtc_info { struct device *dev; + struct i2c_client *i2c; struct sec_pmic_dev *s5m87xx; struct regmap *regmap; struct rtc_device *rtc_dev; @@ -49,6 +50,20 @@ struct s5m_rtc_info { bool wtsr_smpl; }; +static const struct regmap_config s5m_rtc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = SEC_RTC_REG_MAX, +}; + +static const struct regmap_config s2mps14_rtc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = S2MPS_RTC_REG_MAX, +}; + static void s5m8767_data_to_tm(u8 *data, struct rtc_time *tm, int rtc_24hr_mode) { @@ -554,6 +569,7 @@ static int s5m_rtc_probe(struct platform_device *pdev) struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent); struct sec_platform_data *pdata = s5m87xx->pdata; struct s5m_rtc_info *info; + const struct regmap_config *regmap_cfg; int ret; if (!pdata) { @@ -565,9 +581,37 @@ static int s5m_rtc_probe(struct platform_device *pdev) if (!info) return -ENOMEM; + switch (pdata->device_type) { + case S2MPS14X: + regmap_cfg = &s2mps14_rtc_regmap_config; + break; + case S5M8763X: + regmap_cfg = &s5m_rtc_regmap_config; + break; + case S5M8767X: + regmap_cfg = &s5m_rtc_regmap_config; + break; + default: + dev_err(&pdev->dev, "Device type is not supported by RTC driver\n"); + return -ENODEV; + } + + info->i2c = i2c_new_dummy(s5m87xx->i2c->adapter, RTC_I2C_ADDR); + if (!info->i2c) { + dev_err(&pdev->dev, "Failed to allocate I2C for RTC\n"); + return -ENODEV; + } + + info->regmap = devm_regmap_init_i2c(info->i2c, regmap_cfg); + if (IS_ERR(info->regmap)) { + ret = PTR_ERR(info->regmap); + dev_err(&pdev->dev, "Failed to allocate RTC register map: %d\n", + ret); + goto err; + } + info->dev = &pdev->dev; info->s5m87xx = s5m87xx; - info->regmap = s5m87xx->regmap_rtc; info->device_type = s5m87xx->device_type; info->wtsr_smpl = s5m87xx->wtsr_smpl; @@ -585,7 +629,7 @@ static int s5m_rtc_probe(struct platform_device *pdev) default: ret = -EINVAL; dev_err(&pdev->dev, "Unsupported device type: %d\n", ret); - return ret; + goto err; } platform_set_drvdata(pdev, info); @@ -602,15 +646,24 @@ static int s5m_rtc_probe(struct platform_device *pdev) info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc", &s5m_rtc_ops, THIS_MODULE); - if (IS_ERR(info->rtc_dev)) - return PTR_ERR(info->rtc_dev); + if (IS_ERR(info->rtc_dev)) { + ret = PTR_ERR(info->rtc_dev); + goto err; + } ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, s5m_rtc_alarm_irq, 0, "rtc-alarm0", info); - if (ret < 0) + if (ret < 0) { dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n", info->irq, ret); + goto err; + } + + return 0; + +err: + i2c_unregister_device(info->i2c); return ret; } @@ -639,6 +692,17 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) s5m_rtc_enable_smpl(info, false); } +static int s5m_rtc_remove(struct platform_device *pdev) +{ + struct s5m_rtc_info *info = platform_get_drvdata(pdev); + + /* Perform also all shutdown steps when removing */ + s5m_rtc_shutdown(pdev); + i2c_unregister_device(info->i2c); + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int s5m_rtc_resume(struct device *dev) { @@ -676,6 +740,7 @@ static struct platform_driver s5m_rtc_driver = { .pm = &s5m_rtc_pm_ops, }, .probe = s5m_rtc_probe, + .remove = s5m_rtc_remove, .shutdown = s5m_rtc_shutdown, .id_table = s5m_rtc_id, }; diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 157e32b6ca28..84aaf6c25794 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -28,7 +28,6 @@ enum sec_device_type { * @dev: master device of the chip (can be used to access platform data) * @pdata: pointer to private data used to pass platform data to child * @i2c: i2c client private data for regulator - * @rtc: i2c client private data for rtc * @iolock: mutex for serializing io access * @irqlock: mutex for buslock * @irq_base: base IRQ number for sec-pmic, required for IRQs @@ -42,9 +41,7 @@ struct sec_pmic_dev { struct device *dev; struct sec_platform_data *pdata; struct regmap *regmap_pmic; - struct regmap *regmap_rtc; struct i2c_client *i2c; - struct i2c_client *rtc; int device_type; int irq_base; -- cgit v1.2.3 From 51f1f1cb24b870db44edcab56ffd89ecf8ce09e8 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 24 Apr 2014 10:05:38 +0200 Subject: mfd: sec-core: Update sec_pmic documentation Update the documentation for sec_pmic state container structure to reflect current code. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones --- include/linux/mfd/samsung/core.h | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 84aaf6c25794..1c66a6462887 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -24,18 +24,23 @@ enum sec_device_type { }; /** - * struct sec_pmic_dev - s5m87xx master device for sub-drivers - * @dev: master device of the chip (can be used to access platform data) - * @pdata: pointer to private data used to pass platform data to child - * @i2c: i2c client private data for regulator - * @iolock: mutex for serializing io access - * @irqlock: mutex for buslock - * @irq_base: base IRQ number for sec-pmic, required for IRQs - * @irq: generic IRQ number for s5m87xx - * @ono: power onoff IRQ number for s5m87xx - * @irq_masks_cur: currently active value - * @irq_masks_cache: cached hardware value - * @type: indicate which s5m87xx "variant" is used + * struct sec_pmic_dev - s2m/s5m master device for sub-drivers + * @dev: Master device of the chip + * @pdata: Platform data populated with data from DTS + * or board files + * @regmap_pmic: Regmap associated with PMIC's I2C address + * @i2c: I2C client of the main driver + * @device_type: Type of device, matches enum sec_device_type + * @irq_base: Base IRQ number for device, required for IRQs + * @irq: Generic IRQ number for device + * @irq_data: Runtime data structure for IRQ controller + * @ono: Power onoff IRQ number for s5m87xx + * @wakeup: Whether or not this is a wakeup device + * @wtsr_smpl: Whether or not to enable in RTC driver the Watchdog + * Timer Software Reset (registers set to default value + * after PWRHOLD falling) and Sudden Momentary Power Loss + * (PMIC will enter power on sequence after short drop in + * VBATT voltage). */ struct sec_pmic_dev { struct device *dev; -- cgit v1.2.3 From 1ec93b9b176b3c4e065c326ccf40458fcc01e6c0 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Fri, 25 Apr 2014 09:24:21 +0800 Subject: mfd: rdc321x: Fix off-by-one for ngpio setting The valid gpio is GPIO0 ~ GPIO58, so ngpio should be 59. This patch also renames RDC321X_MAX_GPIO to RDC321X_NUM_GPIO because it actually means the number of available GPIOs. Signed-off-by: Axel Lin Acked-by: Linus Walleij Signed-off-by: Lee Jones --- drivers/mfd/rdc321x-southbridge.c | 2 +- include/linux/mfd/rdc321x.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c index c79569750be9..6575585f1d1f 100644 --- a/drivers/mfd/rdc321x-southbridge.c +++ b/drivers/mfd/rdc321x-southbridge.c @@ -38,7 +38,7 @@ static struct resource rdc321x_wdt_resource[] = { }; static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = { - .max_gpios = RDC321X_MAX_GPIO, + .max_gpios = RDC321X_NUM_GPIO, }; static struct resource rdc321x_gpio_resources[] = { diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h index 4bdf19c8eedf..442743a8f915 100644 --- a/include/linux/mfd/rdc321x.h +++ b/include/linux/mfd/rdc321x.h @@ -12,7 +12,7 @@ #define RDC321X_GPIO_CTRL_REG2 0x84 #define RDC321X_GPIO_DATA_REG2 0x88 -#define RDC321X_MAX_GPIO 58 +#define RDC321X_NUM_GPIO 59 struct rdc321x_gpio_pdata { struct pci_dev *sb_pdev; -- cgit v1.2.3 From 11e38e11afcdd598d0978746924a001e3e7cb723 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 1 Apr 2014 16:44:59 +0300 Subject: mfd: twl6040: Select i2c fast mode as default with regmap patch All boards using twl6040 configures the i2c bus to 400KHz. While twl6040's defaults to normal mode (100KHz). So far twl6040 has no problem with i2c communication in this configuration it is safer to select fast i2c mode. Signed-off-by: Peter Ujfalusi Signed-off-by: Lee Jones --- drivers/mfd/twl6040.c | 9 +++++++-- include/linux/mfd/twl6040.h | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index 03dbff3597a2..cb37bb8f8e41 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -87,8 +87,13 @@ static struct reg_default twl6040_defaults[] = { }; static struct reg_default twl6040_patch[] = { - /* Select I2C bus access to dual access registers */ - { TWL6040_REG_ACCCTL, 0x09 }, + /* + * Select I2C bus access to dual access registers + * Interrupt register is cleared on read + * Select fast mode for i2c (400KHz) + */ + { TWL6040_REG_ACCCTL, + TWL6040_I2CSEL | TWL6040_INTCLRMODE | TWL6040_I2CMODE(1) }, }; diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index 81f639bc1ae6..a69d16b30c18 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -157,6 +157,7 @@ #define TWL6040_I2CSEL 0x01 #define TWL6040_RESETSPLIT 0x04 #define TWL6040_INTCLRMODE 0x08 +#define TWL6040_I2CMODE(x) ((x & 0x3) << 4) /* STATUS (0x2E) fields */ -- cgit v1.2.3 From 68bab8662f49b9e158f1d32f11becd4e48c04079 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 3 Apr 2014 13:54:41 +0300 Subject: mfd: twl6040: Optional clk32k clock handling In certain boards the source for the clk32k clock can be gated. In these boards the clk32k clock can be provided to the driver and it is going to be enabled/disabled when it is needed. If the clk32k clock is not provided the driver will assume that it is always running. Signed-off-by: Peter Ujfalusi Signed-off-by: Lee Jones --- Documentation/devicetree/bindings/mfd/twl6040.txt | 2 ++ drivers/mfd/twl6040.c | 10 ++++++++++ include/linux/mfd/twl6040.h | 2 ++ 3 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt index 0f5dd709d752..a41157b5d930 100644 --- a/Documentation/devicetree/bindings/mfd/twl6040.txt +++ b/Documentation/devicetree/bindings/mfd/twl6040.txt @@ -19,6 +19,8 @@ Required properties: Optional properties, nodes: - enable-active-high: To power on the twl6040 during boot. +- clocks: phandle to the clk32k clock provider +- clock-names: Must be "clk32k" Vibra functionality Required properties: diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index cb37bb8f8e41..574774d7f826 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -291,6 +291,8 @@ int twl6040_power(struct twl6040 *twl6040, int on) if (twl6040->power_count++) goto out; + clk_prepare_enable(twl6040->clk32k); + /* Allow writes to the chip */ regcache_cache_only(twl6040->regmap, false); @@ -346,6 +348,8 @@ int twl6040_power(struct twl6040 *twl6040, int on) twl6040->sysclk = 0; twl6040->mclk = 0; + + clk_disable_unprepare(twl6040->clk32k); } out: @@ -644,6 +648,12 @@ static int twl6040_probe(struct i2c_client *client, i2c_set_clientdata(client, twl6040); + twl6040->clk32k = devm_clk_get(&client->dev, "clk32k"); + if (IS_ERR(twl6040->clk32k)) { + dev_info(&client->dev, "clk32k is not handled\n"); + twl6040->clk32k = NULL; + } + twl6040->supplies[0].supply = "vio"; twl6040->supplies[1].supply = "v2v1"; ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES, diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index a69d16b30c18..8f9fc3d26e6d 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -28,6 +28,7 @@ #include #include #include +#include #define TWL6040_REG_ASICID 0x01 #define TWL6040_REG_ASICREV 0x02 @@ -223,6 +224,7 @@ struct twl6040 { struct regmap *regmap; struct regmap_irq_chip_data *irq_data; struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */ + struct clk *clk32k; struct mutex mutex; struct mutex irq_mutex; struct mfd_cell cells[TWL6040_CELLS]; -- cgit v1.2.3 From 9549b5ff001a8904372370d10be9a2f05e10eca5 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 23 Apr 2014 16:13:05 +0200 Subject: mfd: sec-core: Remove duplicated device type from sec_pmic_dev The device type was stored in sec_pmic_dev state container twice: - unsigned long type (initialized from of_device_id or i2c_device_id) - int device_type (initialized as above or from board files when there is no DTS) The 'type' field was never used outside of probe so it can be safely removed. Change also the device_type in sec_pmic_dev and sec_platform_data to unsigned long to avoid any casts. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones --- drivers/mfd/sec-core.c | 5 +++-- drivers/mfd/sec-irq.c | 2 +- include/linux/mfd/samsung/core.h | 3 +-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index d4682c6cbff5..09fd256abcf6 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -251,6 +251,7 @@ static int sec_pmic_probe(struct i2c_client *i2c, struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev); const struct regmap_config *regmap; struct sec_pmic_dev *sec_pmic; + unsigned long device_type; int ret; sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev), @@ -262,7 +263,7 @@ static int sec_pmic_probe(struct i2c_client *i2c, sec_pmic->dev = &i2c->dev; sec_pmic->i2c = i2c; sec_pmic->irq = i2c->irq; - sec_pmic->type = sec_i2c_get_driver_data(i2c, id); + device_type = sec_i2c_get_driver_data(i2c, id); if (sec_pmic->dev->of_node) { pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev); @@ -270,7 +271,7 @@ static int sec_pmic_probe(struct i2c_client *i2c, ret = PTR_ERR(pdata); return ret; } - pdata->device_type = sec_pmic->type; + pdata->device_type = device_type; } if (pdata) { sec_pmic->device_type = pdata->device_type; diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c index 64e7913aadc6..654e2c1dbf7a 100644 --- a/drivers/mfd/sec-irq.c +++ b/drivers/mfd/sec-irq.c @@ -385,7 +385,7 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) &sec_pmic->irq_data); break; default: - dev_err(sec_pmic->dev, "Unknown device type %d\n", + dev_err(sec_pmic->dev, "Unknown device type %lu\n", sec_pmic->device_type); return -EINVAL; } diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 1c66a6462887..47d84242940b 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -48,13 +48,12 @@ struct sec_pmic_dev { struct regmap *regmap_pmic; struct i2c_client *i2c; - int device_type; + unsigned long device_type; int irq_base; int irq; struct regmap_irq_chip_data *irq_data; int ono; - unsigned long type; bool wakeup; bool wtsr_smpl; }; -- cgit v1.2.3 From 3e87933a68dce6a27bf1006964f8c850e13140b5 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 8 Apr 2014 17:14:15 -0700 Subject: mfd: pm8921: Remove pm8xxx API now that sub-devices use regmap The pm8xxx read/write wrappers are no longer necessary now that all the sub-device drivers are using the regmap API. Remove it. Signed-off-by: Stephen Boyd Signed-off-by: Lee Jones --- drivers/mfd/pm8921-core.c | 123 +--------------------------------------- include/linux/mfd/pm8xxx/core.h | 81 -------------------------- 2 files changed, 2 insertions(+), 202 deletions(-) delete mode 100644 include/linux/mfd/pm8xxx/core.h (limited to 'include/linux') diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c index b97a97187ae9..959513803542 100644 --- a/drivers/mfd/pm8921-core.c +++ b/drivers/mfd/pm8921-core.c @@ -26,7 +26,6 @@ #include #include #include -#include #define SSBI_REG_ADDR_IRQ_BASE 0x1BB @@ -57,7 +56,6 @@ #define PM8921_NR_IRQS 256 struct pm_irq_chip { - struct device *dev; struct regmap *regmap; spinlock_t pm_irq_lock; struct irq_domain *irqdomain; @@ -67,11 +65,6 @@ struct pm_irq_chip { u8 config[0]; }; -struct pm8921 { - struct device *dev; - struct pm_irq_chip *irq_chip; -}; - static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int *ip) { @@ -255,55 +248,6 @@ static struct irq_chip pm8xxx_irq_chip = { .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; -/** - * pm8xxx_get_irq_stat - get the status of the irq line - * @chip: pointer to identify a pmic irq controller - * @irq: the irq number - * - * The pm8xxx gpio and mpp rely on the interrupt block to read - * the values on their pins. This function is to facilitate reading - * the status of a gpio or an mpp line. The caller has to convert the - * gpio number to irq number. - * - * RETURNS: - * an int indicating the value read on that line - */ -static int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq) -{ - int pmirq, rc; - unsigned int block, bits, bit; - unsigned long flags; - struct irq_data *irq_data = irq_get_irq_data(irq); - - pmirq = irq_data->hwirq; - - block = pmirq / 8; - bit = pmirq % 8; - - spin_lock_irqsave(&chip->pm_irq_lock, flags); - - rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block); - if (rc) { - pr_err("Failed Selecting block irq=%d pmirq=%d blk=%d rc=%d\n", - irq, pmirq, block, rc); - goto bail_out; - } - - rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits); - if (rc) { - pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n", - irq, pmirq, block, rc); - goto bail_out; - } - - rc = (bits & (1 << bit)) ? 1 : 0; - -bail_out: - spin_unlock_irqrestore(&chip->pm_irq_lock, flags); - - return rc; -} - static int pm8xxx_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { @@ -324,56 +268,6 @@ static const struct irq_domain_ops pm8xxx_irq_domain_ops = { .map = pm8xxx_irq_domain_map, }; -static int pm8921_readb(const struct device *dev, u16 addr, u8 *val) -{ - const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); - const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; - - return ssbi_read(pmic->dev->parent, addr, val, 1); -} - -static int pm8921_writeb(const struct device *dev, u16 addr, u8 val) -{ - const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); - const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; - - return ssbi_write(pmic->dev->parent, addr, &val, 1); -} - -static int pm8921_read_buf(const struct device *dev, u16 addr, u8 *buf, - int cnt) -{ - const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); - const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; - - return ssbi_read(pmic->dev->parent, addr, buf, cnt); -} - -static int pm8921_write_buf(const struct device *dev, u16 addr, u8 *buf, - int cnt) -{ - const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); - const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; - - return ssbi_write(pmic->dev->parent, addr, buf, cnt); -} - -static int pm8921_read_irq_stat(const struct device *dev, int irq) -{ - const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); - const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; - - return pm8xxx_get_irq_stat(pmic->irq_chip, irq); -} - -static struct pm8xxx_drvdata pm8921_drvdata = { - .pmic_readb = pm8921_readb, - .pmic_writeb = pm8921_writeb, - .pmic_read_buf = pm8921_read_buf, - .pmic_write_buf = pm8921_write_buf, - .pmic_read_irq_stat = pm8921_read_irq_stat, -}; - static const struct regmap_config ssbi_regmap_config = { .reg_bits = 16, .val_bits = 8, @@ -392,7 +286,6 @@ MODULE_DEVICE_TABLE(of, pm8921_id_table); static int pm8921_probe(struct platform_device *pdev) { - struct pm8921 *pmic; struct regmap *regmap; int irq, rc; unsigned int val; @@ -404,12 +297,6 @@ static int pm8921_probe(struct platform_device *pdev) if (irq < 0) return irq; - pmic = devm_kzalloc(&pdev->dev, sizeof(struct pm8921), GFP_KERNEL); - if (!pmic) { - pr_err("Cannot alloc pm8921 struct\n"); - return -ENOMEM; - } - regmap = devm_regmap_init(&pdev->dev, NULL, pdev->dev.parent, &ssbi_regmap_config); if (IS_ERR(regmap)) @@ -434,18 +321,13 @@ static int pm8921_probe(struct platform_device *pdev) pr_info("PMIC revision 2: %02X\n", val); rev |= val << BITS_PER_BYTE; - pmic->dev = &pdev->dev; - pm8921_drvdata.pm_chip_data = pmic; - platform_set_drvdata(pdev, &pm8921_drvdata); - chip = devm_kzalloc(&pdev->dev, sizeof(*chip) + sizeof(chip->config[0]) * nirqs, GFP_KERNEL); if (!chip) return -ENOMEM; - pmic->irq_chip = chip; - chip->dev = &pdev->dev; + platform_set_drvdata(pdev, chip); chip->regmap = regmap; chip->num_irqs = nirqs; chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8); @@ -481,8 +363,7 @@ static int pm8921_remove_child(struct device *dev, void *unused) static int pm8921_remove(struct platform_device *pdev) { int irq = platform_get_irq(pdev, 0); - struct pm8921 *pmic = pm8921_drvdata.pm_chip_data; - struct pm_irq_chip *chip = pmic->irq_chip; + struct pm_irq_chip *chip = platform_get_drvdata(pdev); device_for_each_child(&pdev->dev, NULL, pm8921_remove_child); irq_set_chained_handler(irq, NULL); diff --git a/include/linux/mfd/pm8xxx/core.h b/include/linux/mfd/pm8xxx/core.h deleted file mode 100644 index bd2f4f64e931..000000000000 --- a/include/linux/mfd/pm8xxx/core.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -/* - * Qualcomm PMIC 8xxx driver header file - * - */ - -#ifndef __MFD_PM8XXX_CORE_H -#define __MFD_PM8XXX_CORE_H - -#include - -struct pm8xxx_drvdata { - int (*pmic_readb) (const struct device *dev, u16 addr, u8 *val); - int (*pmic_writeb) (const struct device *dev, u16 addr, u8 val); - int (*pmic_read_buf) (const struct device *dev, u16 addr, u8 *buf, - int n); - int (*pmic_write_buf) (const struct device *dev, u16 addr, u8 *buf, - int n); - int (*pmic_read_irq_stat) (const struct device *dev, int irq); - void *pm_chip_data; -}; - -static inline int pm8xxx_readb(const struct device *dev, u16 addr, u8 *val) -{ - struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); - - if (!dd) - return -EINVAL; - return dd->pmic_readb(dev, addr, val); -} - -static inline int pm8xxx_writeb(const struct device *dev, u16 addr, u8 val) -{ - struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); - - if (!dd) - return -EINVAL; - return dd->pmic_writeb(dev, addr, val); -} - -static inline int pm8xxx_read_buf(const struct device *dev, u16 addr, u8 *buf, - int n) -{ - struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); - - if (!dd) - return -EINVAL; - return dd->pmic_read_buf(dev, addr, buf, n); -} - -static inline int pm8xxx_write_buf(const struct device *dev, u16 addr, u8 *buf, - int n) -{ - struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); - - if (!dd) - return -EINVAL; - return dd->pmic_write_buf(dev, addr, buf, n); -} - -static inline int pm8xxx_read_irq_stat(const struct device *dev, int irq) -{ - struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); - - if (!dd) - return -EINVAL; - return dd->pmic_read_irq_stat(dev, irq); -} - -#endif -- cgit v1.2.3 From cfb61a419630a810033f2777aba724ab6b1272b3 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Thu, 1 May 2014 14:29:27 +0200 Subject: mfd: AXP20x: Add mfd driver for AXP20x PMIC This patch introduces the preliminary support for PMICs X-Powers AXP202 and AXP209. The AXP209 and AXP202 are the PMUs (Power Management Unit) used by A10, A13 and A20 SoCs and developed by X-Powers, a sister company of Allwinner. The core enables support for two subsystems: - PEK (Power Enable Key) - Regulators Signed-off-by: Carlo Caione Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 12 +++ drivers/mfd/Makefile | 1 + drivers/mfd/axp20x.c | 258 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/axp20x.h | 180 +++++++++++++++++++++++++++++++ 4 files changed, 451 insertions(+) create mode 100644 drivers/mfd/axp20x.c create mode 100644 include/linux/mfd/axp20x.h (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index e166d7176d7a..c681741ce492 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -67,6 +67,18 @@ config MFD_BCM590XX help Support for the BCM590xx PMUs from Broadcom +config MFD_AXP20X + bool "X-Powers AXP20X" + select MFD_CORE + select REGMAP_I2C + select REGMAP_IRQ + depends on I2C=y + help + If you say Y here you get support for the X-Powers AXP202 and AXP209. + This driver include only the core APIs. You have to select individual + components like regulators or the PEK (Power Enable Key) under the + corresponding menus. + config MFD_CROS_EC tristate "ChromeOS Embedded Controller" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 2851275e2656..1efecf2793ae 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -102,6 +102,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-irq.o obj-$(CONFIG_PMIC_DA9052) += da9052-core.o obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o +obj-$(CONFIG_MFD_AXP20X) += axp20x.o obj-$(CONFIG_MFD_LP3943) += lp3943.o obj-$(CONFIG_MFD_LP8788) += lp8788.o lp8788-irq.o diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c new file mode 100644 index 000000000000..dee653989e3a --- /dev/null +++ b/drivers/mfd/axp20x.c @@ -0,0 +1,258 @@ +/* + * axp20x.c - MFD core driver for the X-Powers AXP202 and AXP209 + * + * AXP20x comprises an adaptive USB-Compatible PWM charger, 2 BUCK DC-DC + * converters, 5 LDOs, multiple 12-bit ADCs of voltage, current and temperature + * as well as 4 configurable GPIOs. + * + * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AXP20X_OFF 0x80 + +static const struct regmap_range axp20x_writeable_ranges[] = { + regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE), + regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES), +}; + +static const struct regmap_range axp20x_volatile_ranges[] = { + regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE), +}; + +static const struct regmap_access_table axp20x_writeable_table = { + .yes_ranges = axp20x_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(axp20x_writeable_ranges), +}; + +static const struct regmap_access_table axp20x_volatile_table = { + .yes_ranges = axp20x_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(axp20x_volatile_ranges), +}; + +static struct resource axp20x_pek_resources[] = { + { + .name = "PEK_DBR", + .start = AXP20X_IRQ_PEK_RIS_EDGE, + .end = AXP20X_IRQ_PEK_RIS_EDGE, + .flags = IORESOURCE_IRQ, + }, { + .name = "PEK_DBF", + .start = AXP20X_IRQ_PEK_FAL_EDGE, + .end = AXP20X_IRQ_PEK_FAL_EDGE, + .flags = IORESOURCE_IRQ, + }, +}; + +static const struct regmap_config axp20x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .wr_table = &axp20x_writeable_table, + .volatile_table = &axp20x_volatile_table, + .max_register = AXP20X_FG_RES, + .cache_type = REGCACHE_RBTREE, +}; + +#define AXP20X_IRQ(_irq, _off, _mask) \ + [AXP20X_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) } + +static const struct regmap_irq axp20x_regmap_irqs[] = { + AXP20X_IRQ(ACIN_OVER_V, 0, 7), + AXP20X_IRQ(ACIN_PLUGIN, 0, 6), + AXP20X_IRQ(ACIN_REMOVAL, 0, 5), + AXP20X_IRQ(VBUS_OVER_V, 0, 4), + AXP20X_IRQ(VBUS_PLUGIN, 0, 3), + AXP20X_IRQ(VBUS_REMOVAL, 0, 2), + AXP20X_IRQ(VBUS_V_LOW, 0, 1), + AXP20X_IRQ(BATT_PLUGIN, 1, 7), + AXP20X_IRQ(BATT_REMOVAL, 1, 6), + AXP20X_IRQ(BATT_ENT_ACT_MODE, 1, 5), + AXP20X_IRQ(BATT_EXIT_ACT_MODE, 1, 4), + AXP20X_IRQ(CHARG, 1, 3), + AXP20X_IRQ(CHARG_DONE, 1, 2), + AXP20X_IRQ(BATT_TEMP_HIGH, 1, 1), + AXP20X_IRQ(BATT_TEMP_LOW, 1, 0), + AXP20X_IRQ(DIE_TEMP_HIGH, 2, 7), + AXP20X_IRQ(CHARG_I_LOW, 2, 6), + AXP20X_IRQ(DCDC1_V_LONG, 2, 5), + AXP20X_IRQ(DCDC2_V_LONG, 2, 4), + AXP20X_IRQ(DCDC3_V_LONG, 2, 3), + AXP20X_IRQ(PEK_SHORT, 2, 1), + AXP20X_IRQ(PEK_LONG, 2, 0), + AXP20X_IRQ(N_OE_PWR_ON, 3, 7), + AXP20X_IRQ(N_OE_PWR_OFF, 3, 6), + AXP20X_IRQ(VBUS_VALID, 3, 5), + AXP20X_IRQ(VBUS_NOT_VALID, 3, 4), + AXP20X_IRQ(VBUS_SESS_VALID, 3, 3), + AXP20X_IRQ(VBUS_SESS_END, 3, 2), + AXP20X_IRQ(LOW_PWR_LVL1, 3, 1), + AXP20X_IRQ(LOW_PWR_LVL2, 3, 0), + AXP20X_IRQ(TIMER, 4, 7), + AXP20X_IRQ(PEK_RIS_EDGE, 4, 6), + AXP20X_IRQ(PEK_FAL_EDGE, 4, 5), + AXP20X_IRQ(GPIO3_INPUT, 4, 3), + AXP20X_IRQ(GPIO2_INPUT, 4, 2), + AXP20X_IRQ(GPIO1_INPUT, 4, 1), + AXP20X_IRQ(GPIO0_INPUT, 4, 0), +}; + +static const struct of_device_id axp20x_of_match[] = { + { .compatible = "x-powers,axp202", .data = (void *) AXP202_ID }, + { .compatible = "x-powers,axp209", .data = (void *) AXP209_ID }, + { }, +}; +MODULE_DEVICE_TABLE(of, axp20x_of_match); + +/* + * This is useless for OF-enabled devices, but it is needed by I2C subsystem + */ +static const struct i2c_device_id axp20x_i2c_id[] = { + { }, +}; +MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id); + +static const struct regmap_irq_chip axp20x_regmap_irq_chip = { + .name = "axp20x_irq_chip", + .status_base = AXP20X_IRQ1_STATE, + .ack_base = AXP20X_IRQ1_STATE, + .mask_base = AXP20X_IRQ1_EN, + .num_regs = 5, + .irqs = axp20x_regmap_irqs, + .num_irqs = ARRAY_SIZE(axp20x_regmap_irqs), + .mask_invert = true, + .init_ack_masked = true, +}; + +static const char * const axp20x_supplies[] = { + "acin", + "vin2", + "vin3", + "ldo24in", + "ldo3in", + "ldo5in", +}; + +static struct mfd_cell axp20x_cells[] = { + { + .name = "axp20x-pek", + .num_resources = ARRAY_SIZE(axp20x_pek_resources), + .resources = axp20x_pek_resources, + }, { + .name = "axp20x-regulator", + .parent_supplies = axp20x_supplies, + .num_parent_supplies = ARRAY_SIZE(axp20x_supplies), + }, +}; + +static struct axp20x_dev *axp20x_pm_power_off; +static void axp20x_power_off(void) +{ + regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL, + AXP20X_OFF); +} + +static int axp20x_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct axp20x_dev *axp20x; + const struct of_device_id *of_id; + int ret; + + axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL); + if (!axp20x) + return -ENOMEM; + + of_id = of_match_device(axp20x_of_match, &i2c->dev); + if (!of_id) { + dev_err(&i2c->dev, "Unable to setup AXP20X data\n"); + return -ENODEV; + } + axp20x->variant = (long) of_id->data; + + axp20x->i2c_client = i2c; + axp20x->dev = &i2c->dev; + dev_set_drvdata(axp20x->dev, axp20x); + + axp20x->regmap = devm_regmap_init_i2c(i2c, &axp20x_regmap_config); + if (IS_ERR(axp20x->regmap)) { + ret = PTR_ERR(axp20x->regmap); + dev_err(&i2c->dev, "regmap init failed: %d\n", ret); + return ret; + } + + ret = regmap_add_irq_chip(axp20x->regmap, i2c->irq, + IRQF_ONESHOT | IRQF_SHARED, -1, + &axp20x_regmap_irq_chip, + &axp20x->regmap_irqc); + if (ret) { + dev_err(&i2c->dev, "failed to add irq chip: %d\n", ret); + return ret; + } + + ret = mfd_add_devices(axp20x->dev, -1, axp20x_cells, + ARRAY_SIZE(axp20x_cells), NULL, 0, NULL); + + if (ret) { + dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret); + regmap_del_irq_chip(i2c->irq, axp20x->regmap_irqc); + return ret; + } + + if (!pm_power_off) { + axp20x_pm_power_off = axp20x; + pm_power_off = axp20x_power_off; + } + + dev_info(&i2c->dev, "AXP20X driver loaded\n"); + + return 0; +} + +static int axp20x_i2c_remove(struct i2c_client *i2c) +{ + struct axp20x_dev *axp20x = i2c_get_clientdata(i2c); + + if (axp20x == axp20x_pm_power_off) { + axp20x_pm_power_off = NULL; + pm_power_off = NULL; + } + + mfd_remove_devices(axp20x->dev); + regmap_del_irq_chip(axp20x->i2c_client->irq, axp20x->regmap_irqc); + + return 0; +} + +static struct i2c_driver axp20x_i2c_driver = { + .driver = { + .name = "axp20x", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(axp20x_of_match), + }, + .probe = axp20x_i2c_probe, + .remove = axp20x_i2c_remove, + .id_table = axp20x_i2c_id, +}; + +module_i2c_driver(axp20x_i2c_driver); + +MODULE_DESCRIPTION("PMIC MFD core driver for AXP20X"); +MODULE_AUTHOR("Carlo Caione "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h new file mode 100644 index 000000000000..d0e31a2287ac --- /dev/null +++ b/include/linux/mfd/axp20x.h @@ -0,0 +1,180 @@ +/* + * Functions and registers to access AXP20X power management chip. + * + * Copyright (C) 2013, Carlo Caione + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_AXP20X_H +#define __LINUX_MFD_AXP20X_H + +enum { + AXP202_ID = 0, + AXP209_ID, +}; + +#define AXP20X_DATACACHE(m) (0x04 + (m)) + +/* Power supply */ +#define AXP20X_PWR_INPUT_STATUS 0x00 +#define AXP20X_PWR_OP_MODE 0x01 +#define AXP20X_USB_OTG_STATUS 0x02 +#define AXP20X_PWR_OUT_CTRL 0x12 +#define AXP20X_DCDC2_V_OUT 0x23 +#define AXP20X_DCDC2_LDO3_V_SCAL 0x25 +#define AXP20X_DCDC3_V_OUT 0x27 +#define AXP20X_LDO24_V_OUT 0x28 +#define AXP20X_LDO3_V_OUT 0x29 +#define AXP20X_VBUS_IPSOUT_MGMT 0x30 +#define AXP20X_V_OFF 0x31 +#define AXP20X_OFF_CTRL 0x32 +#define AXP20X_CHRG_CTRL1 0x33 +#define AXP20X_CHRG_CTRL2 0x34 +#define AXP20X_CHRG_BAK_CTRL 0x35 +#define AXP20X_PEK_KEY 0x36 +#define AXP20X_DCDC_FREQ 0x37 +#define AXP20X_V_LTF_CHRG 0x38 +#define AXP20X_V_HTF_CHRG 0x39 +#define AXP20X_APS_WARN_L1 0x3a +#define AXP20X_APS_WARN_L2 0x3b +#define AXP20X_V_LTF_DISCHRG 0x3c +#define AXP20X_V_HTF_DISCHRG 0x3d + +/* Interrupt */ +#define AXP20X_IRQ1_EN 0x40 +#define AXP20X_IRQ2_EN 0x41 +#define AXP20X_IRQ3_EN 0x42 +#define AXP20X_IRQ4_EN 0x43 +#define AXP20X_IRQ5_EN 0x44 +#define AXP20X_IRQ1_STATE 0x48 +#define AXP20X_IRQ2_STATE 0x49 +#define AXP20X_IRQ3_STATE 0x4a +#define AXP20X_IRQ4_STATE 0x4b +#define AXP20X_IRQ5_STATE 0x4c + +/* ADC */ +#define AXP20X_ACIN_V_ADC_H 0x56 +#define AXP20X_ACIN_V_ADC_L 0x57 +#define AXP20X_ACIN_I_ADC_H 0x58 +#define AXP20X_ACIN_I_ADC_L 0x59 +#define AXP20X_VBUS_V_ADC_H 0x5a +#define AXP20X_VBUS_V_ADC_L 0x5b +#define AXP20X_VBUS_I_ADC_H 0x5c +#define AXP20X_VBUS_I_ADC_L 0x5d +#define AXP20X_TEMP_ADC_H 0x5e +#define AXP20X_TEMP_ADC_L 0x5f +#define AXP20X_TS_IN_H 0x62 +#define AXP20X_TS_IN_L 0x63 +#define AXP20X_GPIO0_V_ADC_H 0x64 +#define AXP20X_GPIO0_V_ADC_L 0x65 +#define AXP20X_GPIO1_V_ADC_H 0x66 +#define AXP20X_GPIO1_V_ADC_L 0x67 +#define AXP20X_PWR_BATT_H 0x70 +#define AXP20X_PWR_BATT_M 0x71 +#define AXP20X_PWR_BATT_L 0x72 +#define AXP20X_BATT_V_H 0x78 +#define AXP20X_BATT_V_L 0x79 +#define AXP20X_BATT_CHRG_I_H 0x7a +#define AXP20X_BATT_CHRG_I_L 0x7b +#define AXP20X_BATT_DISCHRG_I_H 0x7c +#define AXP20X_BATT_DISCHRG_I_L 0x7d +#define AXP20X_IPSOUT_V_HIGH_H 0x7e +#define AXP20X_IPSOUT_V_HIGH_L 0x7f + +/* Power supply */ +#define AXP20X_DCDC_MODE 0x80 +#define AXP20X_ADC_EN1 0x82 +#define AXP20X_ADC_EN2 0x83 +#define AXP20X_ADC_RATE 0x84 +#define AXP20X_GPIO10_IN_RANGE 0x85 +#define AXP20X_GPIO1_ADC_IRQ_RIS 0x86 +#define AXP20X_GPIO1_ADC_IRQ_FAL 0x87 +#define AXP20X_TIMER_CTRL 0x8a +#define AXP20X_VBUS_MON 0x8b +#define AXP20X_OVER_TMP 0x8f + +/* GPIO */ +#define AXP20X_GPIO0_CTRL 0x90 +#define AXP20X_LDO5_V_OUT 0x91 +#define AXP20X_GPIO1_CTRL 0x92 +#define AXP20X_GPIO2_CTRL 0x93 +#define AXP20X_GPIO20_SS 0x94 +#define AXP20X_GPIO3_CTRL 0x95 + +/* Battery */ +#define AXP20X_CHRG_CC_31_24 0xb0 +#define AXP20X_CHRG_CC_23_16 0xb1 +#define AXP20X_CHRG_CC_15_8 0xb2 +#define AXP20X_CHRG_CC_7_0 0xb3 +#define AXP20X_DISCHRG_CC_31_24 0xb4 +#define AXP20X_DISCHRG_CC_23_16 0xb5 +#define AXP20X_DISCHRG_CC_15_8 0xb6 +#define AXP20X_DISCHRG_CC_7_0 0xb7 +#define AXP20X_CC_CTRL 0xb8 +#define AXP20X_FG_RES 0xb9 + +/* Regulators IDs */ +enum { + AXP20X_LDO1 = 0, + AXP20X_LDO2, + AXP20X_LDO3, + AXP20X_LDO4, + AXP20X_LDO5, + AXP20X_DCDC2, + AXP20X_DCDC3, + AXP20X_REG_ID_MAX, +}; + +/* IRQs */ +enum { + AXP20X_IRQ_ACIN_OVER_V = 1, + AXP20X_IRQ_ACIN_PLUGIN, + AXP20X_IRQ_ACIN_REMOVAL, + AXP20X_IRQ_VBUS_OVER_V, + AXP20X_IRQ_VBUS_PLUGIN, + AXP20X_IRQ_VBUS_REMOVAL, + AXP20X_IRQ_VBUS_V_LOW, + AXP20X_IRQ_BATT_PLUGIN, + AXP20X_IRQ_BATT_REMOVAL, + AXP20X_IRQ_BATT_ENT_ACT_MODE, + AXP20X_IRQ_BATT_EXIT_ACT_MODE, + AXP20X_IRQ_CHARG, + AXP20X_IRQ_CHARG_DONE, + AXP20X_IRQ_BATT_TEMP_HIGH, + AXP20X_IRQ_BATT_TEMP_LOW, + AXP20X_IRQ_DIE_TEMP_HIGH, + AXP20X_IRQ_CHARG_I_LOW, + AXP20X_IRQ_DCDC1_V_LONG, + AXP20X_IRQ_DCDC2_V_LONG, + AXP20X_IRQ_DCDC3_V_LONG, + AXP20X_IRQ_PEK_SHORT = 22, + AXP20X_IRQ_PEK_LONG, + AXP20X_IRQ_N_OE_PWR_ON, + AXP20X_IRQ_N_OE_PWR_OFF, + AXP20X_IRQ_VBUS_VALID, + AXP20X_IRQ_VBUS_NOT_VALID, + AXP20X_IRQ_VBUS_SESS_VALID, + AXP20X_IRQ_VBUS_SESS_END, + AXP20X_IRQ_LOW_PWR_LVL1, + AXP20X_IRQ_LOW_PWR_LVL2, + AXP20X_IRQ_TIMER, + AXP20X_IRQ_PEK_RIS_EDGE, + AXP20X_IRQ_PEK_FAL_EDGE, + AXP20X_IRQ_GPIO3_INPUT, + AXP20X_IRQ_GPIO2_INPUT, + AXP20X_IRQ_GPIO1_INPUT, + AXP20X_IRQ_GPIO0_INPUT, +}; + +struct axp20x_dev { + struct device *dev; + struct i2c_client *i2c_client; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irqc; + long variant; +}; + +#endif /* __LINUX_MFD_AXP20X_H */ -- cgit v1.2.3 From 3d2379909374ef2de6bc57ed8966c7ca8c9dfb82 Mon Sep 17 00:00:00 2001 From: Tushar Behera Date: Fri, 9 May 2014 16:37:40 +0530 Subject: mfd: syscon: Include linux/err.h to fix build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit df73de9b0d412 ("mfd: syscon: Return -ENOSYS if CONFIG_MFD_SYSCON is not enabled") introduced fallbacks for APIs, but missed out on adding the header file. This would work only if linux/err.h is also included in the source code from where this file is included. It would be better to include linux/err.h in file to remove possible build errors. Without this patch, we get following and similar build errors if this header file is included in some source file and CONFIG_MFD_SYSCON is not enabled. include/linux/mfd/syscon.h: In function ‘syscon_node_to_regmap’: include/linux/mfd/syscon.h:30:2: error: implicit declaration of function ‘ERR_PTR’ [-Werror=implicit-function-declaration] return ERR_PTR(-ENOSYS); ^ include/linux/mfd/syscon.h:30:18: error: ‘ENOSYS’ undeclared (first use in this function) return ERR_PTR(-ENOSYS); ^ Signed-off-by: Tushar Behera Signed-off-by: Lee Jones --- include/linux/mfd/syscon.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index 8789fa3c7fd9..75e543b78f53 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -15,6 +15,8 @@ #ifndef __LINUX_MFD_SYSCON_H__ #define __LINUX_MFD_SYSCON_H__ +#include + struct device_node; #ifdef CONFIG_MFD_SYSCON -- cgit v1.2.3 From dcc21cc09e3c22d0ede4e105afa8884eba293b58 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 3 Apr 2014 17:45:15 +0200 Subject: mfd: Add driver for Atmel Microcontroller on iPaq h3xxx This adds a driver for the Atmel Microcontroller found on the iPAQ h3xxx series. This device handles some keys, the touchscreen, and the battery monitoring. This is a port of a driver from handhelds.org 2.6.21 kernel, written by Alessandro Gardich based on Andrew Christians original HAL-driver. It has been heavily cleaned and converted to mfd-core by Dmitry Artamonow and rewritten again for the v3.x series kernels by Linus Walleij, bringing back some of the functionality lost from Andrew's original driver. Acked-by: Greg Kroah-Hartman Signed-off-by: Alessandro Gardich Signed-off-by: Dmitry Artamonow Signed-off-by: Linus Walleij Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 10 + drivers/mfd/Makefile | 1 + drivers/mfd/ipaq-micro.c | 482 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/ipaq-micro.h | 148 +++++++++++++ 4 files changed, 641 insertions(+) create mode 100644 drivers/mfd/ipaq-micro.c create mode 100644 include/linux/mfd/ipaq-micro.h (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index e56fb3749bca..60cef41b0af4 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -262,6 +262,16 @@ config MFD_INTEL_MSIC Passage) chip. This chip embeds audio, battery, GPIO, etc. devices used in Intel Medfield platforms. +config MFD_IPAQ_MICRO + bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support" + depends on SA1100_H3100 || SA1100_H3600 + select MFD_CORE + help + Select this to get support for the Microcontroller found in + the Compaq iPAQ handheld computers. This is an Atmel + AT90LS8535 microcontroller flashed with a special iPAQ + firmware using the custom protocol implemented in this driver. + config MFD_JANZ_CMODIO tristate "Janz CMOD-IO PCI MODULbus Carrier Board" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index df7823cae5af..5dec445ab139 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -168,3 +168,4 @@ obj-$(CONFIG_MFD_RETU) += retu-mfd.o obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o obj-$(CONFIG_MFD_STW481X) += stw481x.o +obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c new file mode 100644 index 000000000000..1763d6db346e --- /dev/null +++ b/drivers/mfd/ipaq-micro.c @@ -0,0 +1,482 @@ +/* + * Compaq iPAQ h3xxx Atmel microcontroller companion support + * + * This is an Atmel AT90LS8535 with a special flashed-in firmware that + * implements the special protocol used by this driver. + * + * based on previous kernel 2.4 version by Andrew Christian + * Author : Alessandro Gardich + * Author : Dmitry Artamonow + * Author : Linus Walleij + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static void ipaq_micro_trigger_tx(struct ipaq_micro *micro) +{ + struct ipaq_micro_txdev *tx = µ->tx; + struct ipaq_micro_msg *msg = micro->msg; + int i, bp; + u8 checksum; + u32 val; + + bp = 0; + tx->buf[bp++] = CHAR_SOF; + + checksum = ((msg->id & 0x0f) << 4) | (msg->tx_len & 0x0f); + tx->buf[bp++] = checksum; + + for (i = 0; i < msg->tx_len; i++) { + tx->buf[bp++] = msg->tx_data[i]; + checksum += msg->tx_data[i]; + } + + tx->buf[bp++] = checksum; + tx->len = bp; + tx->index = 0; + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, + tx->buf, tx->len, true); + + /* Enable interrupt */ + val = readl(micro->base + UTCR3); + val |= UTCR3_TIE; + writel(val, micro->base + UTCR3); +} + +int ipaq_micro_tx_msg(struct ipaq_micro *micro, struct ipaq_micro_msg *msg) +{ + unsigned long flags; + + dev_dbg(micro->dev, "TX msg: %02x, %d bytes\n", msg->id, msg->tx_len); + + spin_lock_irqsave(µ->lock, flags); + if (micro->msg) { + list_add_tail(&msg->node, µ->queue); + spin_unlock_irqrestore(µ->lock, flags); + return 0; + } + micro->msg = msg; + ipaq_micro_trigger_tx(micro); + spin_unlock_irqrestore(µ->lock, flags); + return 0; +} +EXPORT_SYMBOL(ipaq_micro_tx_msg); + +static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data) +{ + int i; + + dev_dbg(micro->dev, "RX msg: %02x, %d bytes\n", id, len); + + spin_lock(µ->lock); + switch (id) { + case MSG_VERSION: + case MSG_EEPROM_READ: + case MSG_EEPROM_WRITE: + case MSG_BACKLIGHT: + case MSG_NOTIFY_LED: + case MSG_THERMAL_SENSOR: + case MSG_BATTERY: + /* Handle synchronous messages */ + if (micro->msg && micro->msg->id == id) { + struct ipaq_micro_msg *msg = micro->msg; + + memcpy(msg->rx_data, data, len); + msg->rx_len = len; + complete(µ->msg->ack); + if (!list_empty(µ->queue)) { + micro->msg = list_entry(micro->queue.next, + struct ipaq_micro_msg, + node); + list_del_init(µ->msg->node); + ipaq_micro_trigger_tx(micro); + } else + micro->msg = NULL; + dev_dbg(micro->dev, "OK RX message 0x%02x\n", id); + } else { + dev_err(micro->dev, + "out of band RX message 0x%02x\n", id); + if(!micro->msg) + dev_info(micro->dev, "no message queued\n"); + else + dev_info(micro->dev, "expected message %02x\n", + micro->msg->id); + } + break; + case MSG_KEYBOARD: + if (micro->key) + micro->key(micro->key_data, len, data); + else + dev_dbg(micro->dev, "key message ignored, no handle \n"); + break; + case MSG_TOUCHSCREEN: + if (micro->ts) + micro->ts(micro->ts_data, len, data); + else + dev_dbg(micro->dev, "touchscreen message ignored, no handle \n"); + break; + default: + dev_err(micro->dev, + "unknown msg %d [%d] ", id, len); + for (i = 0; i < len; ++i) + pr_cont("0x%02x ", data[i]); + pr_cont("\n"); + } + spin_unlock(µ->lock); +} + +static void micro_process_char(struct ipaq_micro *micro, u8 ch) +{ + struct ipaq_micro_rxdev *rx = µ->rx; + + switch (rx->state) { + case STATE_SOF: /* Looking for SOF */ + if (ch == CHAR_SOF) + rx->state = STATE_ID; /* Next byte is the id and len */ + break; + case STATE_ID: /* Looking for id and len byte */ + rx->id = (ch & 0xf0) >> 4 ; + rx->len = (ch & 0x0f); + rx->index = 0; + rx->chksum = ch; + rx->state = (rx->len > 0) ? STATE_DATA : STATE_CHKSUM; + break; + case STATE_DATA: /* Looking for 'len' data bytes */ + rx->chksum += ch; + rx->buf[rx->index] = ch; + if (++rx->index == rx->len) + rx->state = STATE_CHKSUM; + break; + case STATE_CHKSUM: /* Looking for the checksum */ + if (ch == rx->chksum) + micro_rx_msg(micro, rx->id, rx->len, rx->buf); + rx->state = STATE_SOF; + break; + } +} + +static void micro_rx_chars(struct ipaq_micro *micro) +{ + u32 status, ch; + + while ((status = readl(micro->base + UTSR1)) & UTSR1_RNE) { + ch = readl(micro->base + UTDR); + if (status & UTSR1_PRE) + dev_err(micro->dev, "rx: parity error\n"); + else if (status & UTSR1_FRE) + dev_err(micro->dev, "rx: framing error\n"); + else if (status & UTSR1_ROR) + dev_err(micro->dev, "rx: overrun error\n"); + micro_process_char(micro, ch); + } +} + +static void ipaq_micro_get_version(struct ipaq_micro *micro) +{ + struct ipaq_micro_msg msg = { + .id = MSG_VERSION, + }; + + ipaq_micro_tx_msg_sync(micro, &msg); + if (msg.rx_len == 4) { + memcpy(micro->version, msg.rx_data, 4); + micro->version[4] = '\0'; + } else if (msg.rx_len == 9) { + memcpy(micro->version, msg.rx_data, 4); + micro->version[4] = '\0'; + /* Bytes 4-7 are "pack", byte 8 is "boot type" */ + } else { + dev_err(micro->dev, + "illegal version message %d bytes\n", msg.rx_len); + } +} + +static void ipaq_micro_eeprom_read(struct ipaq_micro *micro, + u8 address, u8 len, u8 *data) +{ + struct ipaq_micro_msg msg = { + .id = MSG_EEPROM_READ, + }; + u8 i; + + for (i = 0; i < len; i++) { + msg.tx_data[0] = address + i; + msg.tx_data[1] = 1; + msg.tx_len = 2; + ipaq_micro_tx_msg_sync(micro, &msg); + memcpy(data + (i * 2), msg.rx_data, 2); + } +} + +static char *ipaq_micro_str(u8 *wchar, u8 len) +{ + char retstr[256]; + u8 i; + + for (i = 0; i < len / 2; i++) + retstr[i] = wchar[i * 2]; + return kstrdup(retstr, GFP_KERNEL); +} + +static u16 ipaq_micro_to_u16(u8 *data) +{ + return data[1] << 8 | data[0]; +} + +static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro) +{ + u8 dump[256]; + char *str; + + ipaq_micro_eeprom_read(micro, 0, 128, dump); + str = ipaq_micro_str(dump, 10); + if (str) { + dev_info(micro->dev, "HM version %s\n", str); + kfree(str); + } + str = ipaq_micro_str(dump+10, 40); + if (str) { + dev_info(micro->dev, "serial number: %s\n", str); + /* Feed the random pool with this */ + add_device_randomness(str, strlen(str)); + kfree(str); + } + str = ipaq_micro_str(dump+50, 20); + if (str) { + dev_info(micro->dev, "module ID: %s\n", str); + kfree(str); + } + str = ipaq_micro_str(dump+70, 10); + if (str) { + dev_info(micro->dev, "product revision: %s\n", str); + kfree(str); + } + dev_info(micro->dev, "product ID: %u\n", ipaq_micro_to_u16(dump+80)); + dev_info(micro->dev, "frame rate: %u fps\n", + ipaq_micro_to_u16(dump+82)); + dev_info(micro->dev, "page mode: %u\n", ipaq_micro_to_u16(dump+84)); + dev_info(micro->dev, "country ID: %u\n", ipaq_micro_to_u16(dump+86)); + dev_info(micro->dev, "color display: %s\n", + ipaq_micro_to_u16(dump+88) ? "yes" : "no"); + dev_info(micro->dev, "ROM size: %u MiB\n", ipaq_micro_to_u16(dump+90)); + dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92)); + dev_info(micro->dev, "screen: %u x %u\n", + ipaq_micro_to_u16(dump+94), ipaq_micro_to_u16(dump+96)); + print_hex_dump(KERN_DEBUG, "eeprom: ", DUMP_PREFIX_OFFSET, 16, 1, + dump, 256, true); + +} + +static void micro_tx_chars(struct ipaq_micro *micro) +{ + struct ipaq_micro_txdev *tx = µ->tx; + u32 val; + + while ((tx->index < tx->len) && + (readl(micro->base + UTSR1) & UTSR1_TNF)) { + writel(tx->buf[tx->index], micro->base + UTDR); + tx->index++; + } + + /* Stop interrupts */ + val = readl(micro->base + UTCR3); + val &= ~UTCR3_TIE; + writel(val, micro->base + UTCR3); +} + +static void micro_reset_comm(struct ipaq_micro *micro) +{ + struct ipaq_micro_rxdev *rx = µ->rx; + u32 val; + + if (micro->msg) + complete(µ->msg->ack); + + /* Initialize Serial channel protocol frame */ + rx->state = STATE_SOF; /* Reset the state machine */ + + /* Set up interrupts */ + writel(0x01, micro->sdlc + 0x0); /* Select UART mode */ + + /* Clean up CR3 */ + writel(0x0, micro->base + UTCR3); + + /* Format: 8N1 */ + writel(UTCR0_8BitData | UTCR0_1StpBit, micro->base + UTCR0); + + /* Baud rate: 115200 */ + writel(0x0, micro->base + UTCR1); + writel(0x1, micro->base + UTCR2); + + /* Clear SR0 */ + writel(0xff, micro->base + UTSR0); + + /* Enable RX int, disable TX int */ + writel(UTCR3_TXE | UTCR3_RXE | UTCR3_RIE, micro->base + UTCR3); + val = readl(micro->base + UTCR3); + val &= ~UTCR3_TIE; + writel(val, micro->base + UTCR3); +} + +static irqreturn_t micro_serial_isr(int irq, void *dev_id) +{ + struct ipaq_micro *micro = dev_id; + struct ipaq_micro_txdev *tx = µ->tx; + u32 status; + + status = readl(micro->base + UTSR0); + do { + if (status & (UTSR0_RID | UTSR0_RFS)) { + if (status & UTSR0_RID) + /* Clear the Receiver IDLE bit */ + writel(UTSR0_RID, micro->base + UTSR0); + micro_rx_chars(micro); + } + + /* Clear break bits */ + if (status & (UTSR0_RBB | UTSR0_REB)) + writel(status & (UTSR0_RBB | UTSR0_REB), + micro->base + UTSR0); + + if (status & UTSR0_TFS) + micro_tx_chars(micro); + + status = readl(micro->base + UTSR0); + + } while (((tx->index < tx->len) && (status & UTSR0_TFS)) || + (status & (UTSR0_RFS | UTSR0_RID))); + + return IRQ_HANDLED; +} + +static struct mfd_cell micro_cells[] = { + { .name = "ipaq-micro-backlight", }, + { .name = "ipaq-micro-battery", }, + { .name = "ipaq-micro-keys", }, + { .name = "ipaq-micro-ts", }, + { .name = "ipaq-micro-leds", }, +}; + +static int micro_resume(struct device *dev) +{ + struct ipaq_micro *micro = dev_get_drvdata(dev); + + micro_reset_comm(micro); + mdelay(10); + + return 0; +} + +static int micro_probe(struct platform_device *pdev) +{ + struct ipaq_micro *micro; + struct resource *res; + int ret; + int irq; + + micro = devm_kzalloc(&pdev->dev, sizeof(*micro), GFP_KERNEL); + if (!micro) + return -ENOMEM; + + micro->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + micro->base = devm_request_and_ioremap(&pdev->dev, res); + if (!micro->base) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -EINVAL; + + micro->sdlc = devm_request_and_ioremap(&pdev->dev, res); + if (!micro->sdlc) + return -ENOMEM; + + micro_reset_comm(micro); + + irq = platform_get_irq(pdev, 0); + if (!irq) + return -EINVAL; + ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr, + IRQF_SHARED, "ipaq-micro", + micro); + if (ret) { + dev_err(&pdev->dev, "unable to grab serial port IRQ\n"); + return ret; + } else + dev_info(&pdev->dev, "grabbed serial port IRQ\n"); + + spin_lock_init(µ->lock); + INIT_LIST_HEAD(µ->queue); + platform_set_drvdata(pdev, micro); + + ret = mfd_add_devices(&pdev->dev, pdev->id, micro_cells, + ARRAY_SIZE(micro_cells), NULL, 0, NULL); + if (ret) { + dev_err(&pdev->dev, "error adding MFD cells"); + return ret; + } + + /* Check version */ + ipaq_micro_get_version(micro); + dev_info(&pdev->dev, "Atmel micro ASIC version %s\n", micro->version); + ipaq_micro_eeprom_dump(micro); + + return 0; +} + +static int micro_remove(struct platform_device *pdev) +{ + struct ipaq_micro *micro = platform_get_drvdata(pdev); + u32 val; + + mfd_remove_devices(&pdev->dev); + + val = readl(micro->base + UTCR3); + val &= ~(UTCR3_RXE | UTCR3_RIE); /* disable receive interrupt */ + val &= ~(UTCR3_TXE | UTCR3_TIE); /* disable transmit interrupt */ + writel(val, micro->base + UTCR3); + + return 0; +} + +static const struct dev_pm_ops micro_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, micro_resume) +}; + +static struct platform_driver micro_device_driver = { + .driver = { + .name = "ipaq-h3xxx-micro", + .pm = µ_dev_pm_ops, + }, + .probe = micro_probe, + .remove = micro_remove, + /* .shutdown = micro_suspend, // FIXME */ +}; +module_platform_driver(micro_device_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("driver for iPAQ Atmel micro core and backlight"); diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h new file mode 100644 index 000000000000..5c4d29f6674f --- /dev/null +++ b/include/linux/mfd/ipaq-micro.h @@ -0,0 +1,148 @@ +/* + * Header file for the compaq Micro MFD + */ + +#ifndef _MFD_IPAQ_MICRO_H_ +#define _MFD_IPAQ_MICRO_H_ + +#include +#include +#include + +#define TX_BUF_SIZE 32 +#define RX_BUF_SIZE 16 +#define CHAR_SOF 0x02 + +/* + * These are the different messages that can be sent to the microcontroller + * to control various aspects. + */ +#define MSG_VERSION 0x0 +#define MSG_KEYBOARD 0x2 +#define MSG_TOUCHSCREEN 0x3 +#define MSG_EEPROM_READ 0x4 +#define MSG_EEPROM_WRITE 0x5 +#define MSG_THERMAL_SENSOR 0x6 +#define MSG_NOTIFY_LED 0x8 +#define MSG_BATTERY 0x9 +#define MSG_SPI_READ 0xb +#define MSG_SPI_WRITE 0xc +#define MSG_BACKLIGHT 0xd /* H3600 only */ +#define MSG_CODEC_CTRL 0xe /* H3100 only */ +#define MSG_DISPLAY_CTRL 0xf /* H3100 only */ + +/* state of receiver parser */ +enum rx_state { + STATE_SOF = 0, /* Next byte should be start of frame */ + STATE_ID, /* Next byte is ID & message length */ + STATE_DATA, /* Next byte is a data byte */ + STATE_CHKSUM /* Next byte should be checksum */ +}; + +/** + * struct ipaq_micro_txdev - TX state + * @len: length of message in TX buffer + * @index: current index into TX buffer + * @buf: TX buffer + */ +struct ipaq_micro_txdev { + u8 len; + u8 index; + u8 buf[TX_BUF_SIZE]; +}; + +/** + * struct ipaq_micro_rxdev - RX state + * @state: context of RX state machine + * @chksum: calculated checksum + * @id: message ID from packet + * @len: RX buffer length + * @index: RX buffer index + * @buf: RX buffer + */ +struct ipaq_micro_rxdev { + enum rx_state state; + unsigned char chksum; + u8 id; + unsigned int len; + unsigned int index; + u8 buf[RX_BUF_SIZE]; +}; + +/** + * struct ipaq_micro_msg - message to the iPAQ microcontroller + * @id: 4-bit ID of the message + * @tx_len: length of TX data + * @tx_data: TX data to send + * @rx_len: length of receieved RX data + * @rx_data: RX data to recieve + * @ack: a completion that will be completed when RX is complete + * @node: list node if message gets queued + */ +struct ipaq_micro_msg { + u8 id; + u8 tx_len; + u8 tx_data[TX_BUF_SIZE]; + u8 rx_len; + u8 rx_data[RX_BUF_SIZE]; + struct completion ack; + struct list_head node; +}; + +/** + * struct ipaq_micro - iPAQ microcontroller state + * @dev: corresponding platform device + * @base: virtual memory base for underlying serial device + * @sdlc: virtual memory base for Synchronous Data Link Controller + * @version: version string + * @tx: TX state + * @rx: RX state + * @lock: lock for this state container + * @msg: current message + * @queue: message queue + * @key: callback for asynchronous key events + * @key_data: data to pass along with key events + * @ts: callback for asynchronous touchscreen events + * @ts_data: data to pass along with key events + */ +struct ipaq_micro { + struct device *dev; + void __iomem *base; + void __iomem *sdlc; + char version[5]; + struct ipaq_micro_txdev tx; /* transmit ISR state */ + struct ipaq_micro_rxdev rx; /* receive ISR state */ + spinlock_t lock; + struct ipaq_micro_msg *msg; + struct list_head queue; + void (*key) (void *data, int len, unsigned char *rxdata); + void *key_data; + void (*ts) (void *data, int len, unsigned char *rxdata); + void *ts_data; +}; + +extern int +ipaq_micro_tx_msg(struct ipaq_micro *micro, struct ipaq_micro_msg *msg); + +static inline int +ipaq_micro_tx_msg_sync(struct ipaq_micro *micro, + struct ipaq_micro_msg *msg) +{ + int ret; + + init_completion(&msg->ack); + ret = ipaq_micro_tx_msg(micro, msg); + wait_for_completion(&msg->ack); + + return ret; +} + +static inline int +ipaq_micro_tx_msg_async(struct ipaq_micro *micro, + struct ipaq_micro_msg *msg) +{ + init_completion(&msg->ack); + return ipaq_micro_tx_msg(micro, msg); +} + +#endif /* _MFD_IPAQ_MICRO_H_ */ -- cgit v1.2.3 From 5271db29d7199fe0ffb303ca4bbbb1485bba28c3 Mon Sep 17 00:00:00 2001 From: Bill Richardson Date: Wed, 30 Apr 2014 10:44:08 -0700 Subject: mfd: cros_ec: Sync to the latest cros_ec_commands.h from EC sources This just updates include/linux/mfd/cros_ec_commands.h to match the latest EC version (which is the One True Source for such things). See [dianders: took today's ToT version from the Chromium OS EC; deleted references to cros_ec_dev and cros_ec_lpc since those aren't upstream yet] Signed-off-by: Bill Richardson Signed-off-by: Doug Anderson Reviewed-by: Simon Glass Tested-by: Andrew Bresticker Tested-by: Stephen Warren Signed-off-by: Lee Jones --- drivers/mfd/cros_ec.c | 2 +- include/linux/mfd/cros_ec.h | 4 +- include/linux/mfd/cros_ec_commands.h | 1128 +++++++++++++++++++++++++++++++--- 3 files changed, 1059 insertions(+), 75 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c index fae69b1db5b1..8b6fb34c9b31 100644 --- a/drivers/mfd/cros_ec.c +++ b/drivers/mfd/cros_ec.c @@ -30,7 +30,7 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, uint8_t *out; int csum, i; - BUG_ON(msg->out_len > EC_HOST_PARAM_SIZE); + BUG_ON(msg->out_len > EC_PROTO2_MAX_PARAM_SIZE); out = ec_dev->dout; out[0] = EC_CMD_VERSION0 + msg->version; out[1] = msg->cmd; diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 032af7fc5b2e..887ef4f7bef7 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -29,8 +29,8 @@ enum { EC_MSG_RX_PROTO_BYTES = 3, /* Max length of messages */ - EC_MSG_BYTES = EC_HOST_PARAM_SIZE + EC_MSG_TX_PROTO_BYTES, - + EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + EC_MSG_TX_PROTO_BYTES, }; /** diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 86fd06953bcd..7853a6410d14 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -24,25 +24,12 @@ #define __CROS_EC_COMMANDS_H /* - * Protocol overview + * Current version of this protocol * - * request: CMD [ P0 P1 P2 ... Pn S ] - * response: ERR [ P0 P1 P2 ... Pn S ] - * - * where the bytes are defined as follow : - * - CMD is the command code. (defined by EC_CMD_ constants) - * - ERR is the error code. (defined by EC_RES_ constants) - * - Px is the optional payload. - * it is not sent if the error code is not success. - * (defined by ec_params_ and ec_response_ structures) - * - S is the checksum which is the sum of all payload bytes. - * - * On LPC, CMD and ERR are sent/received at EC_LPC_ADDR_KERNEL|USER_CMD - * and the payloads are sent/received at EC_LPC_ADDR_KERNEL|USER_PARAM. - * On I2C, all bytes are sent serially in the same message. + * TODO(crosbug.com/p/11223): This is effectively useless; protocol is + * determined in other ways. Remove this once the kernel code no longer + * depends on it. */ - -/* Current version of this protocol */ #define EC_PROTO_VERSION 0x00000002 /* Command version mask */ @@ -57,13 +44,19 @@ #define EC_LPC_ADDR_HOST_CMD 0x204 /* I/O addresses for host command args and params */ -#define EC_LPC_ADDR_HOST_ARGS 0x800 -#define EC_LPC_ADDR_HOST_PARAM 0x804 -#define EC_HOST_PARAM_SIZE 0x0fc /* Size of param area in bytes */ - -/* I/O addresses for host command params, old interface */ -#define EC_LPC_ADDR_OLD_PARAM 0x880 -#define EC_OLD_PARAM_SIZE 0x080 /* Size of param area in bytes */ +/* Protocol version 2 */ +#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */ +#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is + * EC_PROTO2_MAX_PARAM_SIZE */ +/* Protocol version 3 */ +#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */ +#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */ + +/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff + * and they tell the kernel that so we have to think of it as two parts. */ +#define EC_HOST_CMD_REGION0 0x800 +#define EC_HOST_CMD_REGION1 0x880 +#define EC_HOST_CMD_REGION_SIZE 0x80 /* EC command register bit functions */ #define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */ @@ -79,18 +72,22 @@ #define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */ /* The offset address of each type of data in mapped memory. */ -#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors */ -#define EC_MEMMAP_FAN 0x10 /* Fan speeds */ -#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* Temp sensors (second set) */ -#define EC_MEMMAP_ID 0x20 /* 'E' 'C' */ +#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */ +#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */ +#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */ +#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */ #define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */ #define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */ #define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */ #define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */ #define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */ -#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host command interface flags */ -#define EC_MEMMAP_SWITCHES 0x30 -#define EC_MEMMAP_HOST_EVENTS 0x34 +#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */ +/* Unused 0x28 - 0x2f */ +#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */ +/* Unused 0x31 - 0x33 */ +#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */ +/* Reserve 0x38 - 0x3f for additional host event-related stuff */ +/* Battery values are all 32 bits */ #define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */ #define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */ #define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */ @@ -99,10 +96,24 @@ #define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */ #define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */ #define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */ +/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */ #define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */ #define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */ #define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */ #define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */ +#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */ +/* Unused 0x84 - 0x8f */ +#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/ +/* Unused 0x91 */ +#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */ +#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */ +/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */ + + +/* Define the format of the accelerometer mapped memory status byte. */ +#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f +#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4) +#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7) /* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */ #define EC_TEMP_SENSOR_ENTRIES 16 @@ -112,6 +123,8 @@ * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2. */ #define EC_TEMP_SENSOR_B_ENTRIES 8 + +/* Special values for mapped temperature sensors */ #define EC_TEMP_SENSOR_NOT_PRESENT 0xff #define EC_TEMP_SENSOR_ERROR 0xfe #define EC_TEMP_SENSOR_NOT_POWERED 0xfd @@ -122,6 +135,18 @@ */ #define EC_TEMP_SENSOR_OFFSET 200 +/* + * Number of ALS readings at EC_MEMMAP_ALS + */ +#define EC_ALS_ENTRIES 2 + +/* + * The default value a temperature sensor will return when it is present but + * has not been read this boot. This is a reasonable number to avoid + * triggering alarms on the host. + */ +#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET) + #define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */ #define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */ #define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */ @@ -137,8 +162,8 @@ #define EC_SWITCH_LID_OPEN 0x01 #define EC_SWITCH_POWER_BUTTON_PRESSED 0x02 #define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04 -/* Recovery requested via keyboard */ -#define EC_SWITCH_KEYBOARD_RECOVERY 0x08 +/* Was recovery requested via keyboard; now unused. */ +#define EC_SWITCH_IGNORE1 0x08 /* Recovery requested via dedicated signal (from servo board) */ #define EC_SWITCH_DEDICATED_RECOVERY 0x10 /* Was fake developer mode switch; now unused. Remove in next refactor. */ @@ -147,10 +172,15 @@ /* Host command interface flags */ /* Host command interface supports LPC args (LPC interface only) */ #define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01 +/* Host command interface supports version 3 protocol */ +#define EC_HOST_CMD_FLAG_VERSION_3 0x02 /* Wireless switch flags */ -#define EC_WIRELESS_SWITCH_WLAN 0x01 -#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 +#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */ +#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */ +#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */ +#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */ +#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */ /* * This header file is used in coreboot both in C and ACPI code. The ACPI code @@ -159,6 +189,14 @@ */ #ifndef __ACPI__ +/* + * Define __packed if someone hasn't beat us to it. Linux kernel style + * checking prefers __packed over __attribute__((packed)). + */ +#ifndef __packed +#define __packed __attribute__((packed)) +#endif + /* LPC command status byte masks */ /* EC has written a byte in the data register and host hasn't read it yet */ #define EC_LPC_STATUS_TO_HOST 0x01 @@ -198,6 +236,9 @@ enum ec_status { EC_RES_UNAVAILABLE = 9, /* No response available */ EC_RES_TIMEOUT = 10, /* We got a timeout */ EC_RES_OVERFLOW = 11, /* Table / data overflow */ + EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */ + EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */ + EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */ }; /* @@ -235,6 +276,16 @@ enum host_event_code { /* Shutdown due to battery level too low */ EC_HOST_EVENT_BATTERY_SHUTDOWN = 17, + /* Suggest that the AP throttle itself */ + EC_HOST_EVENT_THROTTLE_START = 18, + /* Suggest that the AP resume normal speed */ + EC_HOST_EVENT_THROTTLE_STOP = 19, + + /* Hang detect logic detected a hang and host event timeout expired */ + EC_HOST_EVENT_HANG_DETECT = 20, + /* Hang detect logic detected a hang and warm rebooted the AP */ + EC_HOST_EVENT_HANG_REBOOT = 21, + /* * The high bit of the event mask is not used as a host event code. If * it reads back as set, then the entire event mask should be @@ -279,6 +330,188 @@ struct ec_lpc_host_args { */ #define EC_HOST_ARGS_FLAG_TO_HOST 0x02 +/*****************************************************************************/ +/* + * Byte codes returned by EC over SPI interface. + * + * These can be used by the AP to debug the EC interface, and to determine + * when the EC is not in a state where it will ever get around to responding + * to the AP. + * + * Example of sequence of bytes read from EC for a current good transfer: + * 1. - - AP asserts chip select (CS#) + * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request + * 3. - - EC starts handling CS# interrupt + * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request + * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in + * bytes looking for EC_SPI_FRAME_START + * 6. - - EC finishes processing and sets up response + * 7. EC_SPI_FRAME_START - AP reads frame byte + * 8. (response packet) - AP reads response packet + * 9. EC_SPI_PAST_END - Any additional bytes read by AP + * 10 - - AP deasserts chip select + * 11 - - EC processes CS# interrupt and sets up DMA for + * next request + * + * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than + * the following byte values: + * EC_SPI_OLD_READY + * EC_SPI_RX_READY + * EC_SPI_RECEIVING + * EC_SPI_PROCESSING + * + * Then the EC found an error in the request, or was not ready for the request + * and lost data. The AP should give up waiting for EC_SPI_FRAME_START, + * because the EC is unable to tell when the AP is done sending its request. + */ + +/* + * Framing byte which precedes a response packet from the EC. After sending a + * request, the AP will clock in bytes until it sees the framing byte, then + * clock in the response packet. + */ +#define EC_SPI_FRAME_START 0xec + +/* + * Padding bytes which are clocked out after the end of a response packet. + */ +#define EC_SPI_PAST_END 0xed + +/* + * EC is ready to receive, and has ignored the byte sent by the AP. EC expects + * that the AP will send a valid packet header (starting with + * EC_COMMAND_PROTOCOL_3) in the next 32 bytes. + */ +#define EC_SPI_RX_READY 0xf8 + +/* + * EC has started receiving the request from the AP, but hasn't started + * processing it yet. + */ +#define EC_SPI_RECEIVING 0xf9 + +/* EC has received the entire request from the AP and is processing it. */ +#define EC_SPI_PROCESSING 0xfa + +/* + * EC received bad data from the AP, such as a packet header with an invalid + * length. EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_RX_BAD_DATA 0xfb + +/* + * EC received data from the AP before it was ready. That is, the AP asserted + * chip select and started clocking data before the EC was ready to receive it. + * EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_NOT_READY 0xfc + +/* + * EC was ready to receive a request from the AP. EC has treated the byte sent + * by the AP as part of a request packet, or (for old-style ECs) is processing + * a fully received packet but is not ready to respond yet. + */ +#define EC_SPI_OLD_READY 0xfd + +/*****************************************************************************/ + +/* + * Protocol version 2 for I2C and SPI send a request this way: + * + * 0 EC_CMD_VERSION0 + (command version) + * 1 Command number + * 2 Length of params = N + * 3..N+2 Params, if any + * N+3 8-bit checksum of bytes 0..N+2 + * + * The corresponding response is: + * + * 0 Result code (EC_RES_*) + * 1 Length of params = M + * 2..M+1 Params, if any + * M+2 8-bit checksum of bytes 0..M+1 + */ +#define EC_PROTO2_REQUEST_HEADER_BYTES 3 +#define EC_PROTO2_REQUEST_TRAILER_BYTES 1 +#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \ + EC_PROTO2_REQUEST_TRAILER_BYTES) + +#define EC_PROTO2_RESPONSE_HEADER_BYTES 2 +#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1 +#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \ + EC_PROTO2_RESPONSE_TRAILER_BYTES) + +/* Parameter length was limited by the LPC interface */ +#define EC_PROTO2_MAX_PARAM_SIZE 0xfc + +/* Maximum request and response packet sizes for protocol version 2 */ +#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \ + EC_PROTO2_MAX_PARAM_SIZE) +#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \ + EC_PROTO2_MAX_PARAM_SIZE) + +/*****************************************************************************/ + +/* + * Value written to legacy command port / prefix byte to indicate protocol + * 3+ structs are being used. Usage is bus-dependent. + */ +#define EC_COMMAND_PROTOCOL_3 0xda + +#define EC_HOST_REQUEST_VERSION 3 + +/* Version 3 request from host */ +struct ec_host_request { + /* Struct version (=3) + * + * EC will return EC_RES_INVALID_HEADER if it receives a header with a + * version it doesn't know how to parse. + */ + uint8_t struct_version; + + /* + * Checksum of request and data; sum of all bytes including checksum + * should total to 0. + */ + uint8_t checksum; + + /* Command code */ + uint16_t command; + + /* Command version */ + uint8_t command_version; + + /* Unused byte in current protocol version; set to 0 */ + uint8_t reserved; + + /* Length of data which follows this header */ + uint16_t data_len; +} __packed; + +#define EC_HOST_RESPONSE_VERSION 3 + +/* Version 3 response from EC */ +struct ec_host_response { + /* Struct version (=3) */ + uint8_t struct_version; + + /* + * Checksum of response and data; sum of all bytes including checksum + * should total to 0. + */ + uint8_t checksum; + + /* Result code (EC_RES_*) */ + uint16_t result; + + /* Length of data which follows this header */ + uint16_t data_len; + + /* Unused bytes in current protocol version; set to 0 */ + uint16_t reserved; +} __packed; + +/*****************************************************************************/ /* * Notes on commands: * @@ -418,6 +651,68 @@ struct ec_response_get_comms_status { uint32_t flags; /* Mask of enum ec_comms_status */ } __packed; +/* Fake a variety of responses, purely for testing purposes. */ +#define EC_CMD_TEST_PROTOCOL 0x0a + +/* Tell the EC what to send back to us. */ +struct ec_params_test_protocol { + uint32_t ec_result; + uint32_t ret_len; + uint8_t buf[32]; +} __packed; + +/* Here it comes... */ +struct ec_response_test_protocol { + uint8_t buf[32]; +} __packed; + +/* Get prococol information */ +#define EC_CMD_GET_PROTOCOL_INFO 0x0b + +/* Flags for ec_response_get_protocol_info.flags */ +/* EC_RES_IN_PROGRESS may be returned if a command is slow */ +#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) + +struct ec_response_get_protocol_info { + /* Fields which exist if at least protocol version 3 supported */ + + /* Bitmask of protocol versions supported (1 << n means version n)*/ + uint32_t protocol_versions; + + /* Maximum request packet size, in bytes */ + uint16_t max_request_packet_size; + + /* Maximum response packet size, in bytes */ + uint16_t max_response_packet_size; + + /* Flags; see EC_PROTOCOL_INFO_* */ + uint32_t flags; +} __packed; + + +/*****************************************************************************/ +/* Get/Set miscellaneous values */ + +/* The upper byte of .flags tells what to do (nothing means "get") */ +#define EC_GSV_SET 0x80000000 + +/* The lower three bytes of .flags identifies the parameter, if that has + meaning for an individual command. */ +#define EC_GSV_PARAM_MASK 0x00ffffff + +struct ec_params_get_set_value { + uint32_t flags; + uint32_t value; +} __packed; + +struct ec_response_get_set_value { + uint32_t flags; + uint32_t value; +} __packed; + +/* More than one command can use these structs to get/set paramters. */ +#define EC_CMD_GSV_PAUSE_IN_S5 0x0c + /*****************************************************************************/ /* Flash commands */ @@ -425,6 +720,7 @@ struct ec_response_get_comms_status { /* Get flash info */ #define EC_CMD_FLASH_INFO 0x10 +/* Version 0 returns these fields */ struct ec_response_flash_info { /* Usable flash size, in bytes */ uint32_t flash_size; @@ -445,6 +741,37 @@ struct ec_response_flash_info { uint32_t protect_block_size; } __packed; +/* Flags for version 1+ flash info command */ +/* EC flash erases bits to 0 instead of 1 */ +#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) + +/* + * Version 1 returns the same initial fields as version 0, with additional + * fields following. + * + * gcc anonymous structs don't seem to get along with the __packed directive; + * if they did we'd define the version 0 struct as a sub-struct of this one. + */ +struct ec_response_flash_info_1 { + /* Version 0 fields; see above for description */ + uint32_t flash_size; + uint32_t write_block_size; + uint32_t erase_block_size; + uint32_t protect_block_size; + + /* Version 1 adds these fields: */ + /* + * Ideal write size in bytes. Writes will be fastest if size is + * exactly this and offset is a multiple of this. For example, an EC + * may have a write buffer which can do half-page operations if data is + * aligned, and a slower word-at-a-time write mode. + */ + uint32_t write_ideal_size; + + /* Flags; see EC_FLASH_INFO_* */ + uint32_t flags; +} __packed; + /* * Read flash * @@ -459,15 +786,15 @@ struct ec_params_flash_read { /* Write flash */ #define EC_CMD_FLASH_WRITE 0x12 +#define EC_VER_FLASH_WRITE 1 + +/* Version 0 of the flash command supported only 64 bytes of data */ +#define EC_FLASH_WRITE_VER0_SIZE 64 struct ec_params_flash_write { uint32_t offset; /* Byte offset to write */ uint32_t size; /* Size to write in bytes */ - /* - * Data to write. Could really use EC_PARAM_SIZE - 8, but tidiest to - * use a power of 2 so writes stay aligned. - */ - uint8_t data[64]; + /* Followed by data to write */ } __packed; /* Erase flash */ @@ -543,7 +870,7 @@ struct ec_response_flash_protect { enum ec_flash_region { /* Region which holds read-only EC image */ - EC_FLASH_REGION_RO, + EC_FLASH_REGION_RO = 0, /* Region which holds rewritable EC image */ EC_FLASH_REGION_RW, /* @@ -551,6 +878,8 @@ enum ec_flash_region { * EC_FLASH_REGION_RO) */ EC_FLASH_REGION_WP_RO, + /* Number of regions */ + EC_FLASH_REGION_COUNT, }; struct ec_params_flash_region_info { @@ -639,15 +968,15 @@ struct rgb_s { */ struct lightbar_params { /* Timing */ - int google_ramp_up; - int google_ramp_down; - int s3s0_ramp_up; - int s0_tick_delay[2]; /* AC=0/1 */ - int s0a_tick_delay[2]; /* AC=0/1 */ - int s0s3_ramp_down; - int s3_sleep_for; - int s3_ramp_up; - int s3_ramp_down; + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; /* Oscillation */ uint8_t new_s0; @@ -676,7 +1005,7 @@ struct ec_params_lightbar { union { struct { /* no args */ - } dump, off, on, init, get_seq, get_params; + } dump, off, on, init, get_seq, get_params, version; struct num { uint8_t num; @@ -710,6 +1039,11 @@ struct ec_response_lightbar { struct lightbar_params get_params; + struct version { + uint32_t num; + uint32_t flags; + } version; + struct { /* no return params */ } off, on, init, brightness, seq, reg, rgb, demo, set_params; @@ -730,9 +1064,61 @@ enum lightbar_command { LIGHTBAR_CMD_DEMO = 9, LIGHTBAR_CMD_GET_PARAMS = 10, LIGHTBAR_CMD_SET_PARAMS = 11, + LIGHTBAR_CMD_VERSION = 12, LIGHTBAR_NUM_CMDS }; +/*****************************************************************************/ +/* LED control commands */ + +#define EC_CMD_LED_CONTROL 0x29 + +enum ec_led_id { + /* LED to indicate battery state of charge */ + EC_LED_ID_BATTERY_LED = 0, + /* + * LED to indicate system power state (on or in suspend). + * May be on power button or on C-panel. + */ + EC_LED_ID_POWER_LED, + /* LED on power adapter or its plug */ + EC_LED_ID_ADAPTER_LED, + + EC_LED_ID_COUNT +}; + +/* LED control flags */ +#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */ +#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */ + +enum ec_led_colors { + EC_LED_COLOR_RED = 0, + EC_LED_COLOR_GREEN, + EC_LED_COLOR_BLUE, + EC_LED_COLOR_YELLOW, + EC_LED_COLOR_WHITE, + + EC_LED_COLOR_COUNT +}; + +struct ec_params_led_control { + uint8_t led_id; /* Which LED to control */ + uint8_t flags; /* Control flags */ + + uint8_t brightness[EC_LED_COLOR_COUNT]; +} __packed; + +struct ec_response_led_control { + /* + * Available brightness value range. + * + * Range 0 means color channel not present. + * Range 1 means on/off control. + * Other values means the LED is control by PWM. + */ + uint8_t brightness_range[EC_LED_COLOR_COUNT]; +} __packed; + /*****************************************************************************/ /* Verified boot commands */ @@ -789,6 +1175,181 @@ enum ec_vboot_hash_status { #define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe #define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd +/*****************************************************************************/ +/* + * Motion sense commands. We'll make separate structs for sub-commands with + * different input args, so that we know how much to expect. + */ +#define EC_CMD_MOTION_SENSE_CMD 0x2B + +/* Motion sense commands */ +enum motionsense_command { + /* + * Dump command returns all motion sensor data including motion sense + * module flags and individual sensor flags. + */ + MOTIONSENSE_CMD_DUMP = 0, + + /* + * Info command returns data describing the details of a given sensor, + * including enum motionsensor_type, enum motionsensor_location, and + * enum motionsensor_chip. + */ + MOTIONSENSE_CMD_INFO = 1, + + /* + * EC Rate command is a setter/getter command for the EC sampling rate + * of all motion sensors in milliseconds. + */ + MOTIONSENSE_CMD_EC_RATE = 2, + + /* + * Sensor ODR command is a setter/getter command for the output data + * rate of a specific motion sensor in millihertz. + */ + MOTIONSENSE_CMD_SENSOR_ODR = 3, + + /* + * Sensor range command is a setter/getter command for the range of + * a specified motion sensor in +/-G's or +/- deg/s. + */ + MOTIONSENSE_CMD_SENSOR_RANGE = 4, + + /* + * Setter/getter command for the keyboard wake angle. When the lid + * angle is greater than this value, keyboard wake is disabled in S3, + * and when the lid angle goes less than this value, keyboard wake is + * enabled. Note, the lid angle measurement is an approximate, + * un-calibrated value, hence the wake angle isn't exact. + */ + MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + + /* Number of motionsense sub-commands. */ + MOTIONSENSE_NUM_CMDS +}; + +enum motionsensor_id { + EC_MOTION_SENSOR_ACCEL_BASE = 0, + EC_MOTION_SENSOR_ACCEL_LID = 1, + EC_MOTION_SENSOR_GYRO = 2, + + /* + * Note, if more sensors are added and this count changes, the padding + * in ec_response_motion_sense dump command must be modified. + */ + EC_MOTION_SENSOR_COUNT = 3 +}; + +/* List of motion sensor types. */ +enum motionsensor_type { + MOTIONSENSE_TYPE_ACCEL = 0, + MOTIONSENSE_TYPE_GYRO = 1, +}; + +/* List of motion sensor locations. */ +enum motionsensor_location { + MOTIONSENSE_LOC_BASE = 0, + MOTIONSENSE_LOC_LID = 1, +}; + +/* List of motion sensor chips. */ +enum motionsensor_chip { + MOTIONSENSE_CHIP_KXCJ9 = 0, +}; + +/* Module flag masks used for the dump sub-command. */ +#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0) + +/* Sensor flag masks used for the dump sub-command. */ +#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0) + +/* + * Send this value for the data element to only perform a read. If you + * send any other value, the EC will interpret it as data to set and will + * return the actual value set. + */ +#define EC_MOTION_SENSE_NO_VALUE -1 + +struct ec_params_motion_sense { + uint8_t cmd; + union { + /* Used for MOTIONSENSE_CMD_DUMP. */ + struct { + /* no args */ + } dump; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE and + * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + */ + struct { + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int16_t data; + } ec_rate, kb_wake_angle; + + /* Used for MOTIONSENSE_CMD_INFO. */ + struct { + /* Should be element of enum motionsensor_id. */ + uint8_t sensor_num; + } info; + + /* + * Used for MOTIONSENSE_CMD_SENSOR_ODR and + * MOTIONSENSE_CMD_SENSOR_RANGE. + */ + struct { + /* Should be element of enum motionsensor_id. */ + uint8_t sensor_num; + + /* Rounding flag, true for round-up, false for down. */ + uint8_t roundup; + + uint16_t reserved; + + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int32_t data; + } sensor_odr, sensor_range; + }; +} __packed; + +struct ec_response_motion_sense { + union { + /* Used for MOTIONSENSE_CMD_DUMP. */ + struct { + /* Flags representing the motion sensor module. */ + uint8_t module_flags; + + /* Flags for each sensor in enum motionsensor_id. */ + uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT]; + + /* Array of all sensor data. Each sensor is 3-axis. */ + int16_t data[3*EC_MOTION_SENSOR_COUNT]; + } dump; + + /* Used for MOTIONSENSE_CMD_INFO. */ + struct { + /* Should be element of enum motionsensor_type. */ + uint8_t type; + + /* Should be element of enum motionsensor_location. */ + uint8_t location; + + /* Should be element of enum motionsensor_chip. */ + uint8_t chip; + } info; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, + * MOTIONSENSE_CMD_SENSOR_RANGE, and + * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + */ + struct { + /* Current value of the parameter queried. */ + int32_t ret; + } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + }; +} __packed; + /*****************************************************************************/ /* USB charging control commands */ @@ -868,20 +1429,27 @@ struct ec_response_port80_last_boot { } __packed; /*****************************************************************************/ -/* Thermal engine commands */ +/* Thermal engine commands. Note that there are two implementations. We'll + * reuse the command number, but the data and behavior is incompatible. + * Version 0 is what originally shipped on Link. + * Version 1 separates the CPU thermal limits from the fan control. + */ -/* Set thershold value */ #define EC_CMD_THERMAL_SET_THRESHOLD 0x50 +#define EC_CMD_THERMAL_GET_THRESHOLD 0x51 + +/* The version 0 structs are opaque. You have to know what they are for + * the get/set commands to make any sense. + */ +/* Version 0 - set */ struct ec_params_thermal_set_threshold { uint8_t sensor_type; uint8_t threshold_id; uint16_t value; } __packed; -/* Get threshold value */ -#define EC_CMD_THERMAL_GET_THRESHOLD 0x51 - +/* Version 0 - get */ struct ec_params_thermal_get_threshold { uint8_t sensor_type; uint8_t threshold_id; @@ -891,6 +1459,41 @@ struct ec_response_thermal_get_threshold { uint16_t value; } __packed; + +/* The version 1 structs are visible. */ +enum ec_temp_thresholds { + EC_TEMP_THRESH_WARN = 0, + EC_TEMP_THRESH_HIGH, + EC_TEMP_THRESH_HALT, + + EC_TEMP_THRESH_COUNT +}; + +/* Thermal configuration for one temperature sensor. Temps are in degrees K. + * Zero values will be silently ignored by the thermal task. + */ +struct ec_thermal_config { + uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */ + uint32_t temp_fan_off; /* no active cooling needed */ + uint32_t temp_fan_max; /* max active cooling needed */ +} __packed; + +/* Version 1 - get config for one sensor. */ +struct ec_params_thermal_get_threshold_v1 { + uint32_t sensor_num; +} __packed; +/* This returns a struct ec_thermal_config */ + +/* Version 1 - set config for one sensor. + * Use read-modify-write for best results! */ +struct ec_params_thermal_set_threshold_v1 { + uint32_t sensor_num; + struct ec_thermal_config cfg; +} __packed; +/* This returns no data */ + +/****************************************************************************/ + /* Toggle automatic fan control */ #define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52 @@ -920,6 +1523,18 @@ struct ec_params_tmp006_set_calibration { float b2; } __packed; +/* Read raw TMP006 data */ +#define EC_CMD_TMP006_GET_RAW 0x55 + +struct ec_params_tmp006_get_raw { + uint8_t index; +} __packed; + +struct ec_response_tmp006_get_raw { + int32_t t; /* In 1/100 K */ + int32_t v; /* In nV */ +}; + /*****************************************************************************/ /* MKBP - Matrix KeyBoard Protocol */ @@ -1118,11 +1733,41 @@ struct ec_params_switch_enable_backlight { /* Enable/disable WLAN/Bluetooth */ #define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91 +#define EC_VER_SWITCH_ENABLE_WIRELESS 1 -struct ec_params_switch_enable_wireless { +/* Version 0 params; no response */ +struct ec_params_switch_enable_wireless_v0 { uint8_t enabled; } __packed; +/* Version 1 params */ +struct ec_params_switch_enable_wireless_v1 { + /* Flags to enable now */ + uint8_t now_flags; + + /* Which flags to copy from now_flags */ + uint8_t now_mask; + + /* + * Flags to leave enabled in S3, if they're on at the S0->S3 + * transition. (Other flags will be disabled by the S0->S3 + * transition.) + */ + uint8_t suspend_flags; + + /* Which flags to copy from suspend_flags */ + uint8_t suspend_mask; +} __packed; + +/* Version 1 response */ +struct ec_response_switch_enable_wireless_v1 { + /* Flags to enable now */ + uint8_t now_flags; + + /* Flags to leave enabled in S3 */ + uint8_t suspend_flags; +} __packed; + /*****************************************************************************/ /* GPIO commands. Only available on EC if write protect has been disabled. */ @@ -1147,11 +1792,16 @@ struct ec_response_gpio_get { /*****************************************************************************/ /* I2C commands. Only available when flash write protect is unlocked. */ +/* + * TODO(crosbug.com/p/23570): These commands are deprecated, and will be + * removed soon. Use EC_CMD_I2C_XFER instead. + */ + /* Read I2C bus */ #define EC_CMD_I2C_READ 0x94 struct ec_params_i2c_read { - uint16_t addr; + uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ uint8_t read_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; @@ -1165,7 +1815,7 @@ struct ec_response_i2c_read { struct ec_params_i2c_write { uint16_t data; - uint16_t addr; + uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ uint8_t write_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; @@ -1174,11 +1824,20 @@ struct ec_params_i2c_write { /*****************************************************************************/ /* Charge state commands. Only available when flash write protect unlocked. */ -/* Force charge state machine to stop in idle mode */ -#define EC_CMD_CHARGE_FORCE_IDLE 0x96 +/* Force charge state machine to stop charging the battery or force it to + * discharge the battery. + */ +#define EC_CMD_CHARGE_CONTROL 0x96 +#define EC_VER_CHARGE_CONTROL 1 -struct ec_params_force_idle { - uint8_t enabled; +enum ec_charge_control_mode { + CHARGE_CONTROL_NORMAL = 0, + CHARGE_CONTROL_IDLE, + CHARGE_CONTROL_DISCHARGE, +}; + +struct ec_params_charge_control { + uint32_t mode; /* enum charge_control_mode */ } __packed; /*****************************************************************************/ @@ -1206,14 +1865,231 @@ struct ec_params_force_idle { #define EC_CMD_BATTERY_CUT_OFF 0x99 /*****************************************************************************/ -/* Temporary debug commands. TODO: remove this crosbug.com/p/13849 */ +/* USB port mux control. */ /* - * Dump charge state machine context. - * - * Response is a binary dump of charge state machine context. + * Switch USB mux or return to automatic switching. + */ +#define EC_CMD_USB_MUX 0x9a + +struct ec_params_usb_mux { + uint8_t mux; +} __packed; + +/*****************************************************************************/ +/* LDOs / FETs control. */ + +enum ec_ldo_state { + EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */ + EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */ +}; + +/* + * Switch on/off a LDO. + */ +#define EC_CMD_LDO_SET 0x9b + +struct ec_params_ldo_set { + uint8_t index; + uint8_t state; +} __packed; + +/* + * Get LDO state. + */ +#define EC_CMD_LDO_GET 0x9c + +struct ec_params_ldo_get { + uint8_t index; +} __packed; + +struct ec_response_ldo_get { + uint8_t state; +} __packed; + +/*****************************************************************************/ +/* Power info. */ + +/* + * Get power info. + */ +#define EC_CMD_POWER_INFO 0x9d + +struct ec_response_power_info { + uint32_t usb_dev_type; + uint16_t voltage_ac; + uint16_t voltage_system; + uint16_t current_system; + uint16_t usb_current_limit; +} __packed; + +/*****************************************************************************/ +/* I2C passthru command */ + +#define EC_CMD_I2C_PASSTHRU 0x9e + +/* Slave address is 10 (not 7) bit */ +#define EC_I2C_FLAG_10BIT (1 << 16) + +/* Read data; if not present, message is a write */ +#define EC_I2C_FLAG_READ (1 << 15) + +/* Mask for address */ +#define EC_I2C_ADDR_MASK 0x3ff + +#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */ +#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */ + +/* Any error */ +#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT) + +struct ec_params_i2c_passthru_msg { + uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */ + uint16_t len; /* Number of bytes to read or write */ +} __packed; + +struct ec_params_i2c_passthru { + uint8_t port; /* I2C port number */ + uint8_t num_msgs; /* Number of messages */ + struct ec_params_i2c_passthru_msg msg[]; + /* Data to write for all messages is concatenated here */ +} __packed; + +struct ec_response_i2c_passthru { + uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */ + uint8_t num_msgs; /* Number of messages processed */ + uint8_t data[]; /* Data read by messages concatenated here */ +} __packed; + +/*****************************************************************************/ +/* Power button hang detect */ + +#define EC_CMD_HANG_DETECT 0x9f + +/* Reasons to start hang detection timer */ +/* Power button pressed */ +#define EC_HANG_START_ON_POWER_PRESS (1 << 0) + +/* Lid closed */ +#define EC_HANG_START_ON_LID_CLOSE (1 << 1) + + /* Lid opened */ +#define EC_HANG_START_ON_LID_OPEN (1 << 2) + +/* Start of AP S3->S0 transition (booting or resuming from suspend) */ +#define EC_HANG_START_ON_RESUME (1 << 3) + +/* Reasons to cancel hang detection */ + +/* Power button released */ +#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8) + +/* Any host command from AP received */ +#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9) + +/* Stop on end of AP S0->S3 transition (suspending or shutting down) */ +#define EC_HANG_STOP_ON_SUSPEND (1 << 10) + +/* + * If this flag is set, all the other fields are ignored, and the hang detect + * timer is started. This provides the AP a way to start the hang timer + * without reconfiguring any of the other hang detect settings. Note that + * you must previously have configured the timeouts. + */ +#define EC_HANG_START_NOW (1 << 30) + +/* + * If this flag is set, all the other fields are ignored (including + * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer + * without reconfiguring any of the other hang detect settings. */ -#define EC_CMD_CHARGE_DUMP 0xa0 +#define EC_HANG_STOP_NOW (1 << 31) + +struct ec_params_hang_detect { + /* Flags; see EC_HANG_* */ + uint32_t flags; + + /* Timeout in msec before generating host event, if enabled */ + uint16_t host_event_timeout_msec; + + /* Timeout in msec before generating warm reboot, if enabled */ + uint16_t warm_reboot_timeout_msec; +} __packed; + +/*****************************************************************************/ +/* Commands for battery charging */ + +/* + * This is the single catch-all host command to exchange data regarding the + * charge state machine (v2 and up). + */ +#define EC_CMD_CHARGE_STATE 0xa0 + +/* Subcommands for this host command */ +enum charge_state_command { + CHARGE_STATE_CMD_GET_STATE, + CHARGE_STATE_CMD_GET_PARAM, + CHARGE_STATE_CMD_SET_PARAM, + CHARGE_STATE_NUM_CMDS +}; + +/* + * Known param numbers are defined here. Ranges are reserved for board-specific + * params, which are handled by the particular implementations. + */ +enum charge_state_params { + CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */ + CS_PARAM_CHG_CURRENT, /* charger current limit */ + CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */ + CS_PARAM_CHG_STATUS, /* charger-specific status */ + CS_PARAM_CHG_OPTION, /* charger-specific options */ + /* How many so far? */ + CS_NUM_BASE_PARAMS, + + /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */ + CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000, + CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff, + + /* Other custom param ranges go here... */ +}; + +struct ec_params_charge_state { + uint8_t cmd; /* enum charge_state_command */ + union { + struct { + /* no args */ + } get_state; + + struct { + uint32_t param; /* enum charge_state_param */ + } get_param; + + struct { + uint32_t param; /* param to set */ + uint32_t value; /* value to set */ + } set_param; + }; +} __packed; + +struct ec_response_charge_state { + union { + struct { + int ac; + int chg_voltage; + int chg_current; + int chg_input_current; + int batt_state_of_charge; + } get_state; + + struct { + uint32_t value; + } get_param; + struct { + /* no return values */ + } set_param; + }; +} __packed; + /* * Set maximum battery charging current. @@ -1221,15 +2097,59 @@ struct ec_params_force_idle { #define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1 struct ec_params_current_limit { - uint32_t limit; + uint32_t limit; /* in mA */ +} __packed; + +/* + * Set maximum external power current. + */ +#define EC_CMD_EXT_POWER_CURRENT_LIMIT 0xa2 + +struct ec_params_ext_power_current_limit { + uint32_t limit; /* in mA */ +} __packed; + +/*****************************************************************************/ +/* Smart battery pass-through */ + +/* Get / Set 16-bit smart battery registers */ +#define EC_CMD_SB_READ_WORD 0xb0 +#define EC_CMD_SB_WRITE_WORD 0xb1 + +/* Get / Set string smart battery parameters + * formatted as SMBUS "block". + */ +#define EC_CMD_SB_READ_BLOCK 0xb2 +#define EC_CMD_SB_WRITE_BLOCK 0xb3 + +struct ec_params_sb_rd { + uint8_t reg; +} __packed; + +struct ec_response_sb_rd_word { + uint16_t value; +} __packed; + +struct ec_params_sb_wr_word { + uint8_t reg; + uint16_t value; +} __packed; + +struct ec_response_sb_rd_block { + uint8_t data[32]; +} __packed; + +struct ec_params_sb_wr_block { + uint8_t reg; + uint16_t data[32]; } __packed; /*****************************************************************************/ /* System commands */ /* - * TODO: this is a confusing name, since it doesn't necessarily reboot the EC. - * Rename to "set image" or something similar. + * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't + * necessarily reboot the EC. Rename to "image" or something similar? */ #define EC_CMD_REBOOT_EC 0xd2 @@ -1308,6 +2228,7 @@ struct ec_params_reboot_ec { #define EC_CMD_ACPI_QUERY_EVENT 0x84 /* Valid addresses in ACPI memory space, for read/write commands */ + /* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ #define EC_ACPI_MEM_VERSION 0x00 /* @@ -1317,8 +2238,60 @@ struct ec_params_reboot_ec { #define EC_ACPI_MEM_TEST 0x01 /* Test compliment; writes here are ignored. */ #define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 + /* Keyboard backlight brightness percent (0 - 100) */ #define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 +/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ +#define EC_ACPI_MEM_FAN_DUTY 0x04 + +/* + * DPTF temp thresholds. Any of the EC's temp sensors can have up to two + * independent thresholds attached to them. The current value of the ID + * register determines which sensor is affected by the THRESHOLD and COMMIT + * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme + * as the memory-mapped sensors. The COMMIT register applies those settings. + * + * The spec does not mandate any way to read back the threshold settings + * themselves, but when a threshold is crossed the AP needs a way to determine + * which sensor(s) are responsible. Each reading of the ID register clears and + * returns one sensor ID that has crossed one of its threshold (in either + * direction) since the last read. A value of 0xFF means "no new thresholds + * have tripped". Setting or enabling the thresholds for a sensor will clear + * the unread event count for that sensor. + */ +#define EC_ACPI_MEM_TEMP_ID 0x05 +#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 +#define EC_ACPI_MEM_TEMP_COMMIT 0x07 +/* + * Here are the bits for the COMMIT register: + * bit 0 selects the threshold index for the chosen sensor (0/1) + * bit 1 enables/disables the selected threshold (0 = off, 1 = on) + * Each write to the commit register affects one threshold. + */ +#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0) +#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1) +/* + * Example: + * + * Set the thresholds for sensor 2 to 50 C and 60 C: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET + * write 0x2 to [0x07] -- enable threshold 0 with this value + * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET + * write 0x3 to [0x07] -- enable threshold 1 with this value + * + * Disable the 60 C threshold, leaving the 50 C threshold unchanged: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x1 to [0x07] -- disable threshold 1 + */ + +/* DPTF battery charging current limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 + +/* Charging limit is specified in 64 mA steps */ +#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 +/* Value to disable DPTF battery charging limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff /* Current version of ACPI memory address space */ #define EC_ACPI_MEM_VERSION_CURRENT 1 @@ -1360,10 +2333,21 @@ struct ec_params_reboot_ec { * Header bytes greater than this indicate a later version. For example, * EC_CMD_VERSION0 + 1 means we are using version 1. * - * The old EC interface must not use commands 0dc or higher. + * The old EC interface must not use commands 0xdc or higher. */ #define EC_CMD_VERSION0 0xdc #endif /* !__ACPI__ */ +/*****************************************************************************/ +/* + * Deprecated constants. These constants have been renamed for clarity. The + * meaning and size has not changed. Programs that use the old names should + * switch to the new names soon, as the old names may not be carried forward + * forever. + */ +#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE +#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1 +#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE + #endif /* __CROS_EC_COMMANDS_H */ -- cgit v1.2.3 From a9cd92acabcb8aca8431647005ed868b8c7644c9 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Tue, 20 May 2014 13:48:51 +0100 Subject: mfd: arizona: Correct addresses of always-on trigger registers Update the addresses and names to match current silicon. The WM8997 regmap tables have been adjusted to match the new names. Missing registers have been added to WM5110 default value table. Signed-off-by: Richard Fitzgerald Signed-off-by: Lee Jones --- drivers/mfd/wm5102-tables.c | 2 ++ drivers/mfd/wm5110-tables.c | 12 ++++++++---- drivers/mfd/wm8997-tables.c | 12 ++++++------ include/linux/mfd/arizona/registers.h | 14 ++++++++------ 4 files changed, 24 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c index 070f8cfbbd7a..ada3286c68e9 100644 --- a/drivers/mfd/wm5102-tables.c +++ b/drivers/mfd/wm5102-tables.c @@ -1037,6 +1037,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg) case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6: + case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7: + case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8: case ARIZONA_COMFORT_NOISE_GENERATOR: case ARIZONA_HAPTICS_CONTROL_1: case ARIZONA_HAPTICS_CONTROL_2: diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c index 58bc2be6e6a5..41a7f6fb7802 100644 --- a/drivers/mfd/wm5110-tables.c +++ b/drivers/mfd/wm5110-tables.c @@ -468,10 +468,12 @@ static const struct reg_default wm5110_reg_default[] = { { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */ - { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */ - { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */ - { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */ - { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */ + { 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */ + { 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */ + { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */ + { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */ + { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */ + { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */ @@ -1499,6 +1501,8 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4: + case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5: + case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6: case ARIZONA_COMFORT_NOISE_GENERATOR: case ARIZONA_HAPTICS_CONTROL_1: case ARIZONA_HAPTICS_CONTROL_2: diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c index c9c65197bb69..c7a81da64ee1 100644 --- a/drivers/mfd/wm8997-tables.c +++ b/drivers/mfd/wm8997-tables.c @@ -174,10 +174,10 @@ static const struct reg_default wm8997_reg_default[] = { { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */ - { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */ - { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */ - { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */ - { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */ + { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */ + { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */ + { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */ + { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */ @@ -814,10 +814,10 @@ static bool wm8997_readable_register(struct device *dev, unsigned int reg) case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2: case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3: case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4: - case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1: - case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4: + case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5: + case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6: case ARIZONA_COMFORT_NOISE_GENERATOR: case ARIZONA_HAPTICS_CONTROL_1: case ARIZONA_HAPTICS_CONTROL_2: diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index 7b35c21170d5..7204d8138b24 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -42,12 +42,14 @@ #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64 -#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x68 -#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x69 -#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3 0x6A -#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4 0x6B -#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5 0x6C -#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6 0x6D +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3 0x68 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4 0x69 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5 0x6A +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6 0x6B +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7 0x6C +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8 0x6D #define ARIZONA_COMFORT_NOISE_GENERATOR 0x70 #define ARIZONA_HAPTICS_CONTROL_1 0x90 #define ARIZONA_HAPTICS_CONTROL_2 0x91 -- cgit v1.2.3 From a22c514c6a8bf21663b2e0a5339cc461be2f01a2 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 22 May 2014 09:50:57 +0100 Subject: mfd: abx500-core: Remove unused function abx500_dump_all_banks() abx500_dump_all_banks() has no callers in the kernel, so it's probably safe to remove it. Cc: Linus Walleij Signed-off-by: Lee Jones --- drivers/mfd/abx500-core.c | 20 -------------------- include/linux/mfd/abx500.h | 1 - 2 files changed, 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c index d6d0ec4d21e4..fe418995108c 100644 --- a/drivers/mfd/abx500-core.c +++ b/drivers/mfd/abx500-core.c @@ -151,26 +151,6 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq) } EXPORT_SYMBOL(abx500_startup_irq_enabled); -int abx500_dump_all_banks(void) -{ - struct abx500_ops *ops; - struct device *dummy_child; - struct abx500_device_entry *dev_entry; - - dummy_child = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!dummy_child) - return -ENOMEM; - list_for_each_entry(dev_entry, &abx500_list, list) { - dummy_child->parent = dev_entry->dev; - ops = &dev_entry->ops; - - if ((ops != NULL) && (ops->dump_all_banks != NULL)) - ops->dump_all_banks(dummy_child); - } - kfree(dummy_child); -} -EXPORT_SYMBOL(abx500_dump_all_banks); - MODULE_AUTHOR("Mattias Wallin "); MODULE_DESCRIPTION("ABX500 core driver"); MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index df2508f7f3d2..552cc1d61cc7 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -330,7 +330,6 @@ int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, int abx500_get_chip_id(struct device *dev); int abx500_event_registers_startup_state_get(struct device *dev, u8 *event); int abx500_startup_irq_enabled(struct device *dev, unsigned int irq); -int abx500_dump_all_banks(void); struct abx500_ops { int (*get_chip_id) (struct device *); -- cgit v1.2.3 From 45ac60c0bc93f64c3fe64de8308c8e4bd67ac165 Mon Sep 17 00:00:00 2001 From: Keerthy Date: Thu, 22 May 2014 14:48:30 +0530 Subject: mfd: palmas: Format the header file Formats the palmas header file. Convert all the offset values to hexadecimal. Signed-off-by: Keerthy Signed-off-by: Lee Jones --- include/linux/mfd/palmas.h | 2166 ++++++++++++++++++++++---------------------- 1 file changed, 1083 insertions(+), 1083 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 9974e387e483..ccbb21f718ed 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -482,10 +482,10 @@ enum usb_irq_events { /* helper macro to get correct slave number */ #define PALMAS_BASE_TO_SLAVE(x) ((x >> 8) - 1) -#define PALMAS_BASE_TO_REG(x, y) ((x & 0xff) + y) +#define PALMAS_BASE_TO_REG(x, y) ((x & 0xFF) + y) /* Base addresses of IP blocks in Palmas */ -#define PALMAS_SMPS_DVS_BASE 0x20 +#define PALMAS_SMPS_DVS_BASE 0x020 #define PALMAS_RTC_BASE 0x100 #define PALMAS_VALIDITY_BASE 0x118 #define PALMAS_SMPS_BASE 0x120 @@ -504,19 +504,19 @@ enum usb_irq_events { #define PALMAS_TRIM_GPADC_BASE 0x3CD /* Registers for function RTC */ -#define PALMAS_SECONDS_REG 0x0 -#define PALMAS_MINUTES_REG 0x1 -#define PALMAS_HOURS_REG 0x2 -#define PALMAS_DAYS_REG 0x3 -#define PALMAS_MONTHS_REG 0x4 -#define PALMAS_YEARS_REG 0x5 -#define PALMAS_WEEKS_REG 0x6 -#define PALMAS_ALARM_SECONDS_REG 0x8 -#define PALMAS_ALARM_MINUTES_REG 0x9 -#define PALMAS_ALARM_HOURS_REG 0xA -#define PALMAS_ALARM_DAYS_REG 0xB -#define PALMAS_ALARM_MONTHS_REG 0xC -#define PALMAS_ALARM_YEARS_REG 0xD +#define PALMAS_SECONDS_REG 0x00 +#define PALMAS_MINUTES_REG 0x01 +#define PALMAS_HOURS_REG 0x02 +#define PALMAS_DAYS_REG 0x03 +#define PALMAS_MONTHS_REG 0x04 +#define PALMAS_YEARS_REG 0x05 +#define PALMAS_WEEKS_REG 0x06 +#define PALMAS_ALARM_SECONDS_REG 0x08 +#define PALMAS_ALARM_MINUTES_REG 0x09 +#define PALMAS_ALARM_HOURS_REG 0x0A +#define PALMAS_ALARM_DAYS_REG 0x0B +#define PALMAS_ALARM_MONTHS_REG 0x0C +#define PALMAS_ALARM_YEARS_REG 0x0D #define PALMAS_RTC_CTRL_REG 0x10 #define PALMAS_RTC_STATUS_REG 0x11 #define PALMAS_RTC_INTERRUPTS_REG 0x12 @@ -527,201 +527,201 @@ enum usb_irq_events { /* Bit definitions for SECONDS_REG */ #define PALMAS_SECONDS_REG_SEC1_MASK 0x70 -#define PALMAS_SECONDS_REG_SEC1_SHIFT 4 -#define PALMAS_SECONDS_REG_SEC0_MASK 0x0f -#define PALMAS_SECONDS_REG_SEC0_SHIFT 0 +#define PALMAS_SECONDS_REG_SEC1_SHIFT 0x04 +#define PALMAS_SECONDS_REG_SEC0_MASK 0x0F +#define PALMAS_SECONDS_REG_SEC0_SHIFT 0x00 /* Bit definitions for MINUTES_REG */ #define PALMAS_MINUTES_REG_MIN1_MASK 0x70 -#define PALMAS_MINUTES_REG_MIN1_SHIFT 4 -#define PALMAS_MINUTES_REG_MIN0_MASK 0x0f -#define PALMAS_MINUTES_REG_MIN0_SHIFT 0 +#define PALMAS_MINUTES_REG_MIN1_SHIFT 0x04 +#define PALMAS_MINUTES_REG_MIN0_MASK 0x0F +#define PALMAS_MINUTES_REG_MIN0_SHIFT 0x00 /* Bit definitions for HOURS_REG */ #define PALMAS_HOURS_REG_PM_NAM 0x80 -#define PALMAS_HOURS_REG_PM_NAM_SHIFT 7 +#define PALMAS_HOURS_REG_PM_NAM_SHIFT 0x07 #define PALMAS_HOURS_REG_HOUR1_MASK 0x30 -#define PALMAS_HOURS_REG_HOUR1_SHIFT 4 -#define PALMAS_HOURS_REG_HOUR0_MASK 0x0f -#define PALMAS_HOURS_REG_HOUR0_SHIFT 0 +#define PALMAS_HOURS_REG_HOUR1_SHIFT 0x04 +#define PALMAS_HOURS_REG_HOUR0_MASK 0x0F +#define PALMAS_HOURS_REG_HOUR0_SHIFT 0x00 /* Bit definitions for DAYS_REG */ #define PALMAS_DAYS_REG_DAY1_MASK 0x30 -#define PALMAS_DAYS_REG_DAY1_SHIFT 4 -#define PALMAS_DAYS_REG_DAY0_MASK 0x0f -#define PALMAS_DAYS_REG_DAY0_SHIFT 0 +#define PALMAS_DAYS_REG_DAY1_SHIFT 0x04 +#define PALMAS_DAYS_REG_DAY0_MASK 0x0F +#define PALMAS_DAYS_REG_DAY0_SHIFT 0x00 /* Bit definitions for MONTHS_REG */ #define PALMAS_MONTHS_REG_MONTH1 0x10 -#define PALMAS_MONTHS_REG_MONTH1_SHIFT 4 -#define PALMAS_MONTHS_REG_MONTH0_MASK 0x0f -#define PALMAS_MONTHS_REG_MONTH0_SHIFT 0 +#define PALMAS_MONTHS_REG_MONTH1_SHIFT 0x04 +#define PALMAS_MONTHS_REG_MONTH0_MASK 0x0F +#define PALMAS_MONTHS_REG_MONTH0_SHIFT 0x00 /* Bit definitions for YEARS_REG */ #define PALMAS_YEARS_REG_YEAR1_MASK 0xf0 -#define PALMAS_YEARS_REG_YEAR1_SHIFT 4 -#define PALMAS_YEARS_REG_YEAR0_MASK 0x0f -#define PALMAS_YEARS_REG_YEAR0_SHIFT 0 +#define PALMAS_YEARS_REG_YEAR1_SHIFT 0x04 +#define PALMAS_YEARS_REG_YEAR0_MASK 0x0F +#define PALMAS_YEARS_REG_YEAR0_SHIFT 0x00 /* Bit definitions for WEEKS_REG */ #define PALMAS_WEEKS_REG_WEEK_MASK 0x07 -#define PALMAS_WEEKS_REG_WEEK_SHIFT 0 +#define PALMAS_WEEKS_REG_WEEK_SHIFT 0x00 /* Bit definitions for ALARM_SECONDS_REG */ #define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_MASK 0x70 -#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_SHIFT 4 -#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_MASK 0x0f -#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_SHIFT 0 +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_SHIFT 0x04 +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_MASK 0x0F +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_SHIFT 0x00 /* Bit definitions for ALARM_MINUTES_REG */ #define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_MASK 0x70 -#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_SHIFT 4 -#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_MASK 0x0f -#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_SHIFT 0 +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_SHIFT 0x04 +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_MASK 0x0F +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_SHIFT 0x00 /* Bit definitions for ALARM_HOURS_REG */ #define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM 0x80 -#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM_SHIFT 7 +#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM_SHIFT 0x07 #define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_MASK 0x30 -#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_SHIFT 4 -#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_MASK 0x0f -#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_SHIFT 0 +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_SHIFT 0x04 +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_MASK 0x0F +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_SHIFT 0x00 /* Bit definitions for ALARM_DAYS_REG */ #define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_MASK 0x30 -#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_SHIFT 4 -#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_MASK 0x0f -#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_SHIFT 0 +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_SHIFT 0x04 +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_MASK 0x0F +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_SHIFT 0x00 /* Bit definitions for ALARM_MONTHS_REG */ #define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1 0x10 -#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1_SHIFT 4 -#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_MASK 0x0f -#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_SHIFT 0 +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1_SHIFT 0x04 +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_MASK 0x0F +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_SHIFT 0x00 /* Bit definitions for ALARM_YEARS_REG */ #define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_MASK 0xf0 -#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_SHIFT 4 -#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_MASK 0x0f -#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_SHIFT 0 +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_SHIFT 0x04 +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_MASK 0x0F +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_SHIFT 0x00 /* Bit definitions for RTC_CTRL_REG */ #define PALMAS_RTC_CTRL_REG_RTC_V_OPT 0x80 -#define PALMAS_RTC_CTRL_REG_RTC_V_OPT_SHIFT 7 +#define PALMAS_RTC_CTRL_REG_RTC_V_OPT_SHIFT 0x07 #define PALMAS_RTC_CTRL_REG_GET_TIME 0x40 -#define PALMAS_RTC_CTRL_REG_GET_TIME_SHIFT 6 +#define PALMAS_RTC_CTRL_REG_GET_TIME_SHIFT 0x06 #define PALMAS_RTC_CTRL_REG_SET_32_COUNTER 0x20 -#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER_SHIFT 5 +#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER_SHIFT 0x05 #define PALMAS_RTC_CTRL_REG_TEST_MODE 0x10 -#define PALMAS_RTC_CTRL_REG_TEST_MODE_SHIFT 4 +#define PALMAS_RTC_CTRL_REG_TEST_MODE_SHIFT 0x04 #define PALMAS_RTC_CTRL_REG_MODE_12_24 0x08 -#define PALMAS_RTC_CTRL_REG_MODE_12_24_SHIFT 3 +#define PALMAS_RTC_CTRL_REG_MODE_12_24_SHIFT 0x03 #define PALMAS_RTC_CTRL_REG_AUTO_COMP 0x04 -#define PALMAS_RTC_CTRL_REG_AUTO_COMP_SHIFT 2 +#define PALMAS_RTC_CTRL_REG_AUTO_COMP_SHIFT 0x02 #define PALMAS_RTC_CTRL_REG_ROUND_30S 0x02 -#define PALMAS_RTC_CTRL_REG_ROUND_30S_SHIFT 1 +#define PALMAS_RTC_CTRL_REG_ROUND_30S_SHIFT 0x01 #define PALMAS_RTC_CTRL_REG_STOP_RTC 0x01 -#define PALMAS_RTC_CTRL_REG_STOP_RTC_SHIFT 0 +#define PALMAS_RTC_CTRL_REG_STOP_RTC_SHIFT 0x00 /* Bit definitions for RTC_STATUS_REG */ #define PALMAS_RTC_STATUS_REG_POWER_UP 0x80 -#define PALMAS_RTC_STATUS_REG_POWER_UP_SHIFT 7 +#define PALMAS_RTC_STATUS_REG_POWER_UP_SHIFT 0x07 #define PALMAS_RTC_STATUS_REG_ALARM 0x40 -#define PALMAS_RTC_STATUS_REG_ALARM_SHIFT 6 +#define PALMAS_RTC_STATUS_REG_ALARM_SHIFT 0x06 #define PALMAS_RTC_STATUS_REG_EVENT_1D 0x20 -#define PALMAS_RTC_STATUS_REG_EVENT_1D_SHIFT 5 +#define PALMAS_RTC_STATUS_REG_EVENT_1D_SHIFT 0x05 #define PALMAS_RTC_STATUS_REG_EVENT_1H 0x10 -#define PALMAS_RTC_STATUS_REG_EVENT_1H_SHIFT 4 +#define PALMAS_RTC_STATUS_REG_EVENT_1H_SHIFT 0x04 #define PALMAS_RTC_STATUS_REG_EVENT_1M 0x08 -#define PALMAS_RTC_STATUS_REG_EVENT_1M_SHIFT 3 +#define PALMAS_RTC_STATUS_REG_EVENT_1M_SHIFT 0x03 #define PALMAS_RTC_STATUS_REG_EVENT_1S 0x04 -#define PALMAS_RTC_STATUS_REG_EVENT_1S_SHIFT 2 +#define PALMAS_RTC_STATUS_REG_EVENT_1S_SHIFT 0x02 #define PALMAS_RTC_STATUS_REG_RUN 0x02 -#define PALMAS_RTC_STATUS_REG_RUN_SHIFT 1 +#define PALMAS_RTC_STATUS_REG_RUN_SHIFT 0x01 /* Bit definitions for RTC_INTERRUPTS_REG */ #define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN 0x10 -#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN_SHIFT 4 +#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN_SHIFT 0x04 #define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM 0x08 -#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM_SHIFT 3 +#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM_SHIFT 0x03 #define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER 0x04 -#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER_SHIFT 2 +#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER_SHIFT 0x02 #define PALMAS_RTC_INTERRUPTS_REG_EVERY_MASK 0x03 -#define PALMAS_RTC_INTERRUPTS_REG_EVERY_SHIFT 0 +#define PALMAS_RTC_INTERRUPTS_REG_EVERY_SHIFT 0x00 /* Bit definitions for RTC_COMP_LSB_REG */ -#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_MASK 0xff -#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_SHIFT 0 +#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_MASK 0xFF +#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_SHIFT 0x00 /* Bit definitions for RTC_COMP_MSB_REG */ -#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_MASK 0xff -#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_SHIFT 0 +#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_MASK 0xFF +#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_SHIFT 0x00 /* Bit definitions for RTC_RES_PROG_REG */ -#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_MASK 0x3f -#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_SHIFT 0 +#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_MASK 0x3F +#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_SHIFT 0x00 /* Bit definitions for RTC_RESET_STATUS_REG */ #define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS 0x01 -#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS_SHIFT 0 +#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS_SHIFT 0x00 /* Registers for function BACKUP */ -#define PALMAS_BACKUP0 0x0 -#define PALMAS_BACKUP1 0x1 -#define PALMAS_BACKUP2 0x2 -#define PALMAS_BACKUP3 0x3 -#define PALMAS_BACKUP4 0x4 -#define PALMAS_BACKUP5 0x5 -#define PALMAS_BACKUP6 0x6 -#define PALMAS_BACKUP7 0x7 +#define PALMAS_BACKUP0 0x00 +#define PALMAS_BACKUP1 0x01 +#define PALMAS_BACKUP2 0x02 +#define PALMAS_BACKUP3 0x03 +#define PALMAS_BACKUP4 0x04 +#define PALMAS_BACKUP5 0x05 +#define PALMAS_BACKUP6 0x06 +#define PALMAS_BACKUP7 0x07 /* Bit definitions for BACKUP0 */ -#define PALMAS_BACKUP0_BACKUP_MASK 0xff -#define PALMAS_BACKUP0_BACKUP_SHIFT 0 +#define PALMAS_BACKUP0_BACKUP_MASK 0xFF +#define PALMAS_BACKUP0_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP1 */ -#define PALMAS_BACKUP1_BACKUP_MASK 0xff -#define PALMAS_BACKUP1_BACKUP_SHIFT 0 +#define PALMAS_BACKUP1_BACKUP_MASK 0xFF +#define PALMAS_BACKUP1_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP2 */ -#define PALMAS_BACKUP2_BACKUP_MASK 0xff -#define PALMAS_BACKUP2_BACKUP_SHIFT 0 +#define PALMAS_BACKUP2_BACKUP_MASK 0xFF +#define PALMAS_BACKUP2_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP3 */ -#define PALMAS_BACKUP3_BACKUP_MASK 0xff -#define PALMAS_BACKUP3_BACKUP_SHIFT 0 +#define PALMAS_BACKUP3_BACKUP_MASK 0xFF +#define PALMAS_BACKUP3_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP4 */ -#define PALMAS_BACKUP4_BACKUP_MASK 0xff -#define PALMAS_BACKUP4_BACKUP_SHIFT 0 +#define PALMAS_BACKUP4_BACKUP_MASK 0xFF +#define PALMAS_BACKUP4_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP5 */ -#define PALMAS_BACKUP5_BACKUP_MASK 0xff -#define PALMAS_BACKUP5_BACKUP_SHIFT 0 +#define PALMAS_BACKUP5_BACKUP_MASK 0xFF +#define PALMAS_BACKUP5_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP6 */ -#define PALMAS_BACKUP6_BACKUP_MASK 0xff -#define PALMAS_BACKUP6_BACKUP_SHIFT 0 +#define PALMAS_BACKUP6_BACKUP_MASK 0xFF +#define PALMAS_BACKUP6_BACKUP_SHIFT 0x00 /* Bit definitions for BACKUP7 */ -#define PALMAS_BACKUP7_BACKUP_MASK 0xff -#define PALMAS_BACKUP7_BACKUP_SHIFT 0 +#define PALMAS_BACKUP7_BACKUP_MASK 0xFF +#define PALMAS_BACKUP7_BACKUP_SHIFT 0x00 /* Registers for function SMPS */ -#define PALMAS_SMPS12_CTRL 0x0 -#define PALMAS_SMPS12_TSTEP 0x1 -#define PALMAS_SMPS12_FORCE 0x2 -#define PALMAS_SMPS12_VOLTAGE 0x3 -#define PALMAS_SMPS3_CTRL 0x4 -#define PALMAS_SMPS3_VOLTAGE 0x7 -#define PALMAS_SMPS45_CTRL 0x8 -#define PALMAS_SMPS45_TSTEP 0x9 -#define PALMAS_SMPS45_FORCE 0xA -#define PALMAS_SMPS45_VOLTAGE 0xB -#define PALMAS_SMPS6_CTRL 0xC -#define PALMAS_SMPS6_TSTEP 0xD -#define PALMAS_SMPS6_FORCE 0xE -#define PALMAS_SMPS6_VOLTAGE 0xF +#define PALMAS_SMPS12_CTRL 0x00 +#define PALMAS_SMPS12_TSTEP 0x01 +#define PALMAS_SMPS12_FORCE 0x02 +#define PALMAS_SMPS12_VOLTAGE 0x03 +#define PALMAS_SMPS3_CTRL 0x04 +#define PALMAS_SMPS3_VOLTAGE 0x07 +#define PALMAS_SMPS45_CTRL 0x08 +#define PALMAS_SMPS45_TSTEP 0x09 +#define PALMAS_SMPS45_FORCE 0x0A +#define PALMAS_SMPS45_VOLTAGE 0x0B +#define PALMAS_SMPS6_CTRL 0x0C +#define PALMAS_SMPS6_TSTEP 0x0D +#define PALMAS_SMPS6_FORCE 0x0E +#define PALMAS_SMPS6_VOLTAGE 0x0F #define PALMAS_SMPS7_CTRL 0x10 #define PALMAS_SMPS7_VOLTAGE 0x13 #define PALMAS_SMPS8_CTRL 0x14 @@ -744,303 +744,303 @@ enum usb_irq_events { /* Bit definitions for SMPS12_CTRL */ #define PALMAS_SMPS12_CTRL_WR_S 0x80 -#define PALMAS_SMPS12_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS12_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN 0x40 -#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN_SHIFT 6 +#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 #define PALMAS_SMPS12_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS12_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS12_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS12_TSTEP */ #define PALMAS_SMPS12_TSTEP_TSTEP_MASK 0x03 -#define PALMAS_SMPS12_TSTEP_TSTEP_SHIFT 0 +#define PALMAS_SMPS12_TSTEP_TSTEP_SHIFT 0x00 /* Bit definitions for SMPS12_FORCE */ #define PALMAS_SMPS12_FORCE_CMD 0x80 -#define PALMAS_SMPS12_FORCE_CMD_SHIFT 7 -#define PALMAS_SMPS12_FORCE_VSEL_MASK 0x7f -#define PALMAS_SMPS12_FORCE_VSEL_SHIFT 0 +#define PALMAS_SMPS12_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS12_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS12_FORCE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS12_VOLTAGE */ #define PALMAS_SMPS12_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS12_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS12_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS12_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS12_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS12_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS12_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS3_CTRL */ #define PALMAS_SMPS3_CTRL_WR_S 0x80 -#define PALMAS_SMPS3_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS3_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS3_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS3_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS3_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS3_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS3_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS3_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS3_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS3_VOLTAGE */ #define PALMAS_SMPS3_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS3_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS3_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS3_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS3_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS3_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS3_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS45_CTRL */ #define PALMAS_SMPS45_CTRL_WR_S 0x80 -#define PALMAS_SMPS45_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS45_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN 0x40 -#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN_SHIFT 6 +#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 #define PALMAS_SMPS45_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS45_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS45_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS45_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS45_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS45_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS45_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS45_TSTEP */ #define PALMAS_SMPS45_TSTEP_TSTEP_MASK 0x03 -#define PALMAS_SMPS45_TSTEP_TSTEP_SHIFT 0 +#define PALMAS_SMPS45_TSTEP_TSTEP_SHIFT 0x00 /* Bit definitions for SMPS45_FORCE */ #define PALMAS_SMPS45_FORCE_CMD 0x80 -#define PALMAS_SMPS45_FORCE_CMD_SHIFT 7 -#define PALMAS_SMPS45_FORCE_VSEL_MASK 0x7f -#define PALMAS_SMPS45_FORCE_VSEL_SHIFT 0 +#define PALMAS_SMPS45_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS45_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS45_FORCE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS45_VOLTAGE */ #define PALMAS_SMPS45_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS45_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS45_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS45_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS45_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS45_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS45_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS6_CTRL */ #define PALMAS_SMPS6_CTRL_WR_S 0x80 -#define PALMAS_SMPS6_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS6_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN 0x40 -#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN_SHIFT 6 +#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 #define PALMAS_SMPS6_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS6_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS6_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS6_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS6_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS6_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS6_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS6_TSTEP */ #define PALMAS_SMPS6_TSTEP_TSTEP_MASK 0x03 -#define PALMAS_SMPS6_TSTEP_TSTEP_SHIFT 0 +#define PALMAS_SMPS6_TSTEP_TSTEP_SHIFT 0x00 /* Bit definitions for SMPS6_FORCE */ #define PALMAS_SMPS6_FORCE_CMD 0x80 -#define PALMAS_SMPS6_FORCE_CMD_SHIFT 7 -#define PALMAS_SMPS6_FORCE_VSEL_MASK 0x7f -#define PALMAS_SMPS6_FORCE_VSEL_SHIFT 0 +#define PALMAS_SMPS6_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS6_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS6_FORCE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS6_VOLTAGE */ #define PALMAS_SMPS6_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS6_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS6_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS6_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS6_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS6_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS6_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS7_CTRL */ #define PALMAS_SMPS7_CTRL_WR_S 0x80 -#define PALMAS_SMPS7_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS7_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS7_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS7_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS7_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS7_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS7_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS7_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS7_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS7_VOLTAGE */ #define PALMAS_SMPS7_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS7_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS7_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS7_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS7_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS7_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS7_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS8_CTRL */ #define PALMAS_SMPS8_CTRL_WR_S 0x80 -#define PALMAS_SMPS8_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS8_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN 0x40 -#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN_SHIFT 6 +#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 #define PALMAS_SMPS8_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS8_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS8_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS8_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS8_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS8_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS8_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS8_TSTEP */ #define PALMAS_SMPS8_TSTEP_TSTEP_MASK 0x03 -#define PALMAS_SMPS8_TSTEP_TSTEP_SHIFT 0 +#define PALMAS_SMPS8_TSTEP_TSTEP_SHIFT 0x00 /* Bit definitions for SMPS8_FORCE */ #define PALMAS_SMPS8_FORCE_CMD 0x80 -#define PALMAS_SMPS8_FORCE_CMD_SHIFT 7 -#define PALMAS_SMPS8_FORCE_VSEL_MASK 0x7f -#define PALMAS_SMPS8_FORCE_VSEL_SHIFT 0 +#define PALMAS_SMPS8_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS8_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS8_FORCE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS8_VOLTAGE */ #define PALMAS_SMPS8_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS8_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS8_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS8_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS8_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS8_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS8_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS9_CTRL */ #define PALMAS_SMPS9_CTRL_WR_S 0x80 -#define PALMAS_SMPS9_CTRL_WR_S_SHIFT 7 +#define PALMAS_SMPS9_CTRL_WR_S_SHIFT 0x07 #define PALMAS_SMPS9_CTRL_STATUS_MASK 0x30 -#define PALMAS_SMPS9_CTRL_STATUS_SHIFT 4 +#define PALMAS_SMPS9_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SMPS9_CTRL_MODE_SLEEP_MASK 0x0c -#define PALMAS_SMPS9_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SMPS9_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SMPS9_CTRL_MODE_ACTIVE_MASK 0x03 -#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS9_VOLTAGE */ #define PALMAS_SMPS9_VOLTAGE_RANGE 0x80 -#define PALMAS_SMPS9_VOLTAGE_RANGE_SHIFT 7 -#define PALMAS_SMPS9_VOLTAGE_VSEL_MASK 0x7f -#define PALMAS_SMPS9_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_SMPS9_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS9_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS9_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for SMPS10_CTRL */ #define PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK 0xf0 -#define PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT 4 -#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_MASK 0x0f -#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT 0x04 +#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_MASK 0x0F +#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SMPS10_STATUS */ -#define PALMAS_SMPS10_STATUS_STATUS_MASK 0x0f -#define PALMAS_SMPS10_STATUS_STATUS_SHIFT 0 +#define PALMAS_SMPS10_STATUS_STATUS_MASK 0x0F +#define PALMAS_SMPS10_STATUS_STATUS_SHIFT 0x00 /* Bit definitions for SMPS_CTRL */ #define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN 0x20 -#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN_SHIFT 5 +#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN_SHIFT 0x05 #define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN 0x10 -#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN_SHIFT 4 +#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN_SHIFT 0x04 #define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_MASK 0x0c -#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_SHIFT 2 +#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_SHIFT 0x02 #define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_MASK 0x03 -#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_SHIFT 0 +#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_SHIFT 0x00 /* Bit definitions for SMPS_PD_CTRL */ #define PALMAS_SMPS_PD_CTRL_SMPS9 0x40 -#define PALMAS_SMPS_PD_CTRL_SMPS9_SHIFT 6 +#define PALMAS_SMPS_PD_CTRL_SMPS9_SHIFT 0x06 #define PALMAS_SMPS_PD_CTRL_SMPS8 0x20 -#define PALMAS_SMPS_PD_CTRL_SMPS8_SHIFT 5 +#define PALMAS_SMPS_PD_CTRL_SMPS8_SHIFT 0x05 #define PALMAS_SMPS_PD_CTRL_SMPS7 0x10 -#define PALMAS_SMPS_PD_CTRL_SMPS7_SHIFT 4 +#define PALMAS_SMPS_PD_CTRL_SMPS7_SHIFT 0x04 #define PALMAS_SMPS_PD_CTRL_SMPS6 0x08 -#define PALMAS_SMPS_PD_CTRL_SMPS6_SHIFT 3 +#define PALMAS_SMPS_PD_CTRL_SMPS6_SHIFT 0x03 #define PALMAS_SMPS_PD_CTRL_SMPS45 0x04 -#define PALMAS_SMPS_PD_CTRL_SMPS45_SHIFT 2 +#define PALMAS_SMPS_PD_CTRL_SMPS45_SHIFT 0x02 #define PALMAS_SMPS_PD_CTRL_SMPS3 0x02 -#define PALMAS_SMPS_PD_CTRL_SMPS3_SHIFT 1 +#define PALMAS_SMPS_PD_CTRL_SMPS3_SHIFT 0x01 #define PALMAS_SMPS_PD_CTRL_SMPS12 0x01 -#define PALMAS_SMPS_PD_CTRL_SMPS12_SHIFT 0 +#define PALMAS_SMPS_PD_CTRL_SMPS12_SHIFT 0x00 /* Bit definitions for SMPS_THERMAL_EN */ #define PALMAS_SMPS_THERMAL_EN_SMPS9 0x40 -#define PALMAS_SMPS_THERMAL_EN_SMPS9_SHIFT 6 +#define PALMAS_SMPS_THERMAL_EN_SMPS9_SHIFT 0x06 #define PALMAS_SMPS_THERMAL_EN_SMPS8 0x20 -#define PALMAS_SMPS_THERMAL_EN_SMPS8_SHIFT 5 +#define PALMAS_SMPS_THERMAL_EN_SMPS8_SHIFT 0x05 #define PALMAS_SMPS_THERMAL_EN_SMPS6 0x08 -#define PALMAS_SMPS_THERMAL_EN_SMPS6_SHIFT 3 +#define PALMAS_SMPS_THERMAL_EN_SMPS6_SHIFT 0x03 #define PALMAS_SMPS_THERMAL_EN_SMPS457 0x04 -#define PALMAS_SMPS_THERMAL_EN_SMPS457_SHIFT 2 +#define PALMAS_SMPS_THERMAL_EN_SMPS457_SHIFT 0x02 #define PALMAS_SMPS_THERMAL_EN_SMPS123 0x01 -#define PALMAS_SMPS_THERMAL_EN_SMPS123_SHIFT 0 +#define PALMAS_SMPS_THERMAL_EN_SMPS123_SHIFT 0x00 /* Bit definitions for SMPS_THERMAL_STATUS */ #define PALMAS_SMPS_THERMAL_STATUS_SMPS9 0x40 -#define PALMAS_SMPS_THERMAL_STATUS_SMPS9_SHIFT 6 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS9_SHIFT 0x06 #define PALMAS_SMPS_THERMAL_STATUS_SMPS8 0x20 -#define PALMAS_SMPS_THERMAL_STATUS_SMPS8_SHIFT 5 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS8_SHIFT 0x05 #define PALMAS_SMPS_THERMAL_STATUS_SMPS6 0x08 -#define PALMAS_SMPS_THERMAL_STATUS_SMPS6_SHIFT 3 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS6_SHIFT 0x03 #define PALMAS_SMPS_THERMAL_STATUS_SMPS457 0x04 -#define PALMAS_SMPS_THERMAL_STATUS_SMPS457_SHIFT 2 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS457_SHIFT 0x02 #define PALMAS_SMPS_THERMAL_STATUS_SMPS123 0x01 -#define PALMAS_SMPS_THERMAL_STATUS_SMPS123_SHIFT 0 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS123_SHIFT 0x00 /* Bit definitions for SMPS_SHORT_STATUS */ #define PALMAS_SMPS_SHORT_STATUS_SMPS10 0x80 -#define PALMAS_SMPS_SHORT_STATUS_SMPS10_SHIFT 7 +#define PALMAS_SMPS_SHORT_STATUS_SMPS10_SHIFT 0x07 #define PALMAS_SMPS_SHORT_STATUS_SMPS9 0x40 -#define PALMAS_SMPS_SHORT_STATUS_SMPS9_SHIFT 6 +#define PALMAS_SMPS_SHORT_STATUS_SMPS9_SHIFT 0x06 #define PALMAS_SMPS_SHORT_STATUS_SMPS8 0x20 -#define PALMAS_SMPS_SHORT_STATUS_SMPS8_SHIFT 5 +#define PALMAS_SMPS_SHORT_STATUS_SMPS8_SHIFT 0x05 #define PALMAS_SMPS_SHORT_STATUS_SMPS7 0x10 -#define PALMAS_SMPS_SHORT_STATUS_SMPS7_SHIFT 4 +#define PALMAS_SMPS_SHORT_STATUS_SMPS7_SHIFT 0x04 #define PALMAS_SMPS_SHORT_STATUS_SMPS6 0x08 -#define PALMAS_SMPS_SHORT_STATUS_SMPS6_SHIFT 3 +#define PALMAS_SMPS_SHORT_STATUS_SMPS6_SHIFT 0x03 #define PALMAS_SMPS_SHORT_STATUS_SMPS45 0x04 -#define PALMAS_SMPS_SHORT_STATUS_SMPS45_SHIFT 2 +#define PALMAS_SMPS_SHORT_STATUS_SMPS45_SHIFT 0x02 #define PALMAS_SMPS_SHORT_STATUS_SMPS3 0x02 -#define PALMAS_SMPS_SHORT_STATUS_SMPS3_SHIFT 1 +#define PALMAS_SMPS_SHORT_STATUS_SMPS3_SHIFT 0x01 #define PALMAS_SMPS_SHORT_STATUS_SMPS12 0x01 -#define PALMAS_SMPS_SHORT_STATUS_SMPS12_SHIFT 0 +#define PALMAS_SMPS_SHORT_STATUS_SMPS12_SHIFT 0x00 /* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */ #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9 0x40 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9_SHIFT 6 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9_SHIFT 0x06 #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8 0x20 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8_SHIFT 5 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8_SHIFT 0x05 #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7 0x10 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7_SHIFT 4 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7_SHIFT 0x04 #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6 0x08 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6_SHIFT 3 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6_SHIFT 0x03 #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45 0x04 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45_SHIFT 2 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45_SHIFT 0x02 #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3 0x02 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 1 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 0x01 #define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12 0x01 -#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12_SHIFT 0 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12_SHIFT 0x00 /* Bit definitions for SMPS_POWERGOOD_MASK1 */ #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10 0x80 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10_SHIFT 7 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10_SHIFT 0x07 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9 0x40 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9_SHIFT 6 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9_SHIFT 0x06 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8 0x20 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8_SHIFT 5 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8_SHIFT 0x05 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7 0x10 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7_SHIFT 4 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7_SHIFT 0x04 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6 0x08 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6_SHIFT 3 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6_SHIFT 0x03 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45 0x04 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45_SHIFT 2 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45_SHIFT 0x02 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3 0x02 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 1 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 0x01 #define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12 0x01 -#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12_SHIFT 0 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12_SHIFT 0x00 /* Bit definitions for SMPS_POWERGOOD_MASK2 */ #define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT 0x80 -#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 7 +#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 0x07 #define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7 0x04 -#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7_SHIFT 2 +#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7_SHIFT 0x02 #define PALMAS_SMPS_POWERGOOD_MASK2_VBUS 0x02 -#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS_SHIFT 1 +#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS_SHIFT 0x01 #define PALMAS_SMPS_POWERGOOD_MASK2_ACOK 0x01 -#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK_SHIFT 0 +#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK_SHIFT 0x00 /* Registers for function LDO */ -#define PALMAS_LDO1_CTRL 0x0 -#define PALMAS_LDO1_VOLTAGE 0x1 -#define PALMAS_LDO2_CTRL 0x2 -#define PALMAS_LDO2_VOLTAGE 0x3 -#define PALMAS_LDO3_CTRL 0x4 -#define PALMAS_LDO3_VOLTAGE 0x5 -#define PALMAS_LDO4_CTRL 0x6 -#define PALMAS_LDO4_VOLTAGE 0x7 -#define PALMAS_LDO5_CTRL 0x8 -#define PALMAS_LDO5_VOLTAGE 0x9 -#define PALMAS_LDO6_CTRL 0xA -#define PALMAS_LDO6_VOLTAGE 0xB -#define PALMAS_LDO7_CTRL 0xC -#define PALMAS_LDO7_VOLTAGE 0xD -#define PALMAS_LDO8_CTRL 0xE -#define PALMAS_LDO8_VOLTAGE 0xF +#define PALMAS_LDO1_CTRL 0x00 +#define PALMAS_LDO1_VOLTAGE 0x01 +#define PALMAS_LDO2_CTRL 0x02 +#define PALMAS_LDO2_VOLTAGE 0x03 +#define PALMAS_LDO3_CTRL 0x04 +#define PALMAS_LDO3_VOLTAGE 0x05 +#define PALMAS_LDO4_CTRL 0x06 +#define PALMAS_LDO4_VOLTAGE 0x07 +#define PALMAS_LDO5_CTRL 0x08 +#define PALMAS_LDO5_VOLTAGE 0x09 +#define PALMAS_LDO6_CTRL 0x0A +#define PALMAS_LDO6_VOLTAGE 0x0B +#define PALMAS_LDO7_CTRL 0x0C +#define PALMAS_LDO7_VOLTAGE 0x0D +#define PALMAS_LDO8_CTRL 0x0E +#define PALMAS_LDO8_VOLTAGE 0x0F #define PALMAS_LDO9_CTRL 0x10 #define PALMAS_LDO9_VOLTAGE 0x11 #define PALMAS_LDOLN_CTRL 0x12 @@ -1055,236 +1055,236 @@ enum usb_irq_events { /* Bit definitions for LDO1_CTRL */ #define PALMAS_LDO1_CTRL_WR_S 0x80 -#define PALMAS_LDO1_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO1_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO1_CTRL_STATUS 0x10 -#define PALMAS_LDO1_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO1_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO1_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO1_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO1_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO1_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO1_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO1_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO1_VOLTAGE */ -#define PALMAS_LDO1_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO1_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO1_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO1_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO2_CTRL */ #define PALMAS_LDO2_CTRL_WR_S 0x80 -#define PALMAS_LDO2_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO2_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO2_CTRL_STATUS 0x10 -#define PALMAS_LDO2_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO2_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO2_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO2_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO2_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO2_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO2_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO2_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO2_VOLTAGE */ -#define PALMAS_LDO2_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO2_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO2_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO2_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO3_CTRL */ #define PALMAS_LDO3_CTRL_WR_S 0x80 -#define PALMAS_LDO3_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO3_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO3_CTRL_STATUS 0x10 -#define PALMAS_LDO3_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO3_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO3_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO3_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO3_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO3_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO3_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO3_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO3_VOLTAGE */ -#define PALMAS_LDO3_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO3_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO3_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO3_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO4_CTRL */ #define PALMAS_LDO4_CTRL_WR_S 0x80 -#define PALMAS_LDO4_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO4_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO4_CTRL_STATUS 0x10 -#define PALMAS_LDO4_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO4_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO4_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO4_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO4_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO4_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO4_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO4_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO4_VOLTAGE */ -#define PALMAS_LDO4_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO4_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO4_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO4_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO5_CTRL */ #define PALMAS_LDO5_CTRL_WR_S 0x80 -#define PALMAS_LDO5_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO5_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO5_CTRL_STATUS 0x10 -#define PALMAS_LDO5_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO5_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO5_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO5_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO5_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO5_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO5_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO5_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO5_VOLTAGE */ -#define PALMAS_LDO5_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO5_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO5_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO5_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO6_CTRL */ #define PALMAS_LDO6_CTRL_WR_S 0x80 -#define PALMAS_LDO6_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO6_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO6_CTRL_LDO_VIB_EN 0x40 -#define PALMAS_LDO6_CTRL_LDO_VIB_EN_SHIFT 6 +#define PALMAS_LDO6_CTRL_LDO_VIB_EN_SHIFT 0x06 #define PALMAS_LDO6_CTRL_STATUS 0x10 -#define PALMAS_LDO6_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO6_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO6_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO6_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO6_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO6_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO6_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO6_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO6_VOLTAGE */ -#define PALMAS_LDO6_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO6_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO6_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO6_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO7_CTRL */ #define PALMAS_LDO7_CTRL_WR_S 0x80 -#define PALMAS_LDO7_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO7_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO7_CTRL_STATUS 0x10 -#define PALMAS_LDO7_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO7_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO7_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO7_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO7_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO7_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO7_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO7_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO7_VOLTAGE */ -#define PALMAS_LDO7_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO7_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO7_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO7_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO8_CTRL */ #define PALMAS_LDO8_CTRL_WR_S 0x80 -#define PALMAS_LDO8_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO8_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO8_CTRL_LDO_TRACKING_EN 0x40 -#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN_SHIFT 6 +#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN_SHIFT 0x06 #define PALMAS_LDO8_CTRL_STATUS 0x10 -#define PALMAS_LDO8_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO8_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO8_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO8_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO8_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO8_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO8_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO8_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO8_VOLTAGE */ -#define PALMAS_LDO8_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO8_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO8_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO8_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO9_CTRL */ #define PALMAS_LDO9_CTRL_WR_S 0x80 -#define PALMAS_LDO9_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDO9_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDO9_CTRL_LDO_BYPASS_EN 0x40 -#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN_SHIFT 6 +#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN_SHIFT 0x06 #define PALMAS_LDO9_CTRL_STATUS 0x10 -#define PALMAS_LDO9_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDO9_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDO9_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDO9_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDO9_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDO9_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDO9_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDO9_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDO9_VOLTAGE */ -#define PALMAS_LDO9_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDO9_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDO9_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO9_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDOLN_CTRL */ #define PALMAS_LDOLN_CTRL_WR_S 0x80 -#define PALMAS_LDOLN_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDOLN_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDOLN_CTRL_STATUS 0x10 -#define PALMAS_LDOLN_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDOLN_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDOLN_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDOLN_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDOLN_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDOLN_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDOLN_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDOLN_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDOLN_VOLTAGE */ -#define PALMAS_LDOLN_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDOLN_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDOLN_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDOLN_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDOUSB_CTRL */ #define PALMAS_LDOUSB_CTRL_WR_S 0x80 -#define PALMAS_LDOUSB_CTRL_WR_S_SHIFT 7 +#define PALMAS_LDOUSB_CTRL_WR_S_SHIFT 0x07 #define PALMAS_LDOUSB_CTRL_STATUS 0x10 -#define PALMAS_LDOUSB_CTRL_STATUS_SHIFT 4 +#define PALMAS_LDOUSB_CTRL_STATUS_SHIFT 0x04 #define PALMAS_LDOUSB_CTRL_MODE_SLEEP 0x04 -#define PALMAS_LDOUSB_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_LDOUSB_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_LDOUSB_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for LDOUSB_VOLTAGE */ -#define PALMAS_LDOUSB_VOLTAGE_VSEL_MASK 0x3f -#define PALMAS_LDOUSB_VOLTAGE_VSEL_SHIFT 0 +#define PALMAS_LDOUSB_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDOUSB_VOLTAGE_VSEL_SHIFT 0x00 /* Bit definitions for LDO_CTRL */ #define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS 0x01 -#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS_SHIFT 0 +#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS_SHIFT 0x00 /* Bit definitions for LDO_PD_CTRL1 */ #define PALMAS_LDO_PD_CTRL1_LDO8 0x80 -#define PALMAS_LDO_PD_CTRL1_LDO8_SHIFT 7 +#define PALMAS_LDO_PD_CTRL1_LDO8_SHIFT 0x07 #define PALMAS_LDO_PD_CTRL1_LDO7 0x40 -#define PALMAS_LDO_PD_CTRL1_LDO7_SHIFT 6 +#define PALMAS_LDO_PD_CTRL1_LDO7_SHIFT 0x06 #define PALMAS_LDO_PD_CTRL1_LDO6 0x20 -#define PALMAS_LDO_PD_CTRL1_LDO6_SHIFT 5 +#define PALMAS_LDO_PD_CTRL1_LDO6_SHIFT 0x05 #define PALMAS_LDO_PD_CTRL1_LDO5 0x10 -#define PALMAS_LDO_PD_CTRL1_LDO5_SHIFT 4 +#define PALMAS_LDO_PD_CTRL1_LDO5_SHIFT 0x04 #define PALMAS_LDO_PD_CTRL1_LDO4 0x08 -#define PALMAS_LDO_PD_CTRL1_LDO4_SHIFT 3 +#define PALMAS_LDO_PD_CTRL1_LDO4_SHIFT 0x03 #define PALMAS_LDO_PD_CTRL1_LDO3 0x04 -#define PALMAS_LDO_PD_CTRL1_LDO3_SHIFT 2 +#define PALMAS_LDO_PD_CTRL1_LDO3_SHIFT 0x02 #define PALMAS_LDO_PD_CTRL1_LDO2 0x02 -#define PALMAS_LDO_PD_CTRL1_LDO2_SHIFT 1 +#define PALMAS_LDO_PD_CTRL1_LDO2_SHIFT 0x01 #define PALMAS_LDO_PD_CTRL1_LDO1 0x01 -#define PALMAS_LDO_PD_CTRL1_LDO1_SHIFT 0 +#define PALMAS_LDO_PD_CTRL1_LDO1_SHIFT 0x00 /* Bit definitions for LDO_PD_CTRL2 */ #define PALMAS_LDO_PD_CTRL2_LDOUSB 0x04 -#define PALMAS_LDO_PD_CTRL2_LDOUSB_SHIFT 2 +#define PALMAS_LDO_PD_CTRL2_LDOUSB_SHIFT 0x02 #define PALMAS_LDO_PD_CTRL2_LDOLN 0x02 -#define PALMAS_LDO_PD_CTRL2_LDOLN_SHIFT 1 +#define PALMAS_LDO_PD_CTRL2_LDOLN_SHIFT 0x01 #define PALMAS_LDO_PD_CTRL2_LDO9 0x01 -#define PALMAS_LDO_PD_CTRL2_LDO9_SHIFT 0 +#define PALMAS_LDO_PD_CTRL2_LDO9_SHIFT 0x00 /* Bit definitions for LDO_SHORT_STATUS1 */ #define PALMAS_LDO_SHORT_STATUS1_LDO8 0x80 -#define PALMAS_LDO_SHORT_STATUS1_LDO8_SHIFT 7 +#define PALMAS_LDO_SHORT_STATUS1_LDO8_SHIFT 0x07 #define PALMAS_LDO_SHORT_STATUS1_LDO7 0x40 -#define PALMAS_LDO_SHORT_STATUS1_LDO7_SHIFT 6 +#define PALMAS_LDO_SHORT_STATUS1_LDO7_SHIFT 0x06 #define PALMAS_LDO_SHORT_STATUS1_LDO6 0x20 -#define PALMAS_LDO_SHORT_STATUS1_LDO6_SHIFT 5 +#define PALMAS_LDO_SHORT_STATUS1_LDO6_SHIFT 0x05 #define PALMAS_LDO_SHORT_STATUS1_LDO5 0x10 -#define PALMAS_LDO_SHORT_STATUS1_LDO5_SHIFT 4 +#define PALMAS_LDO_SHORT_STATUS1_LDO5_SHIFT 0x04 #define PALMAS_LDO_SHORT_STATUS1_LDO4 0x08 -#define PALMAS_LDO_SHORT_STATUS1_LDO4_SHIFT 3 +#define PALMAS_LDO_SHORT_STATUS1_LDO4_SHIFT 0x03 #define PALMAS_LDO_SHORT_STATUS1_LDO3 0x04 -#define PALMAS_LDO_SHORT_STATUS1_LDO3_SHIFT 2 +#define PALMAS_LDO_SHORT_STATUS1_LDO3_SHIFT 0x02 #define PALMAS_LDO_SHORT_STATUS1_LDO2 0x02 -#define PALMAS_LDO_SHORT_STATUS1_LDO2_SHIFT 1 +#define PALMAS_LDO_SHORT_STATUS1_LDO2_SHIFT 0x01 #define PALMAS_LDO_SHORT_STATUS1_LDO1 0x01 -#define PALMAS_LDO_SHORT_STATUS1_LDO1_SHIFT 0 +#define PALMAS_LDO_SHORT_STATUS1_LDO1_SHIFT 0x00 /* Bit definitions for LDO_SHORT_STATUS2 */ #define PALMAS_LDO_SHORT_STATUS2_LDOVANA 0x08 -#define PALMAS_LDO_SHORT_STATUS2_LDOVANA_SHIFT 3 +#define PALMAS_LDO_SHORT_STATUS2_LDOVANA_SHIFT 0x03 #define PALMAS_LDO_SHORT_STATUS2_LDOUSB 0x04 -#define PALMAS_LDO_SHORT_STATUS2_LDOUSB_SHIFT 2 +#define PALMAS_LDO_SHORT_STATUS2_LDOUSB_SHIFT 0x02 #define PALMAS_LDO_SHORT_STATUS2_LDOLN 0x02 -#define PALMAS_LDO_SHORT_STATUS2_LDOLN_SHIFT 1 +#define PALMAS_LDO_SHORT_STATUS2_LDOLN_SHIFT 0x01 #define PALMAS_LDO_SHORT_STATUS2_LDO9 0x01 -#define PALMAS_LDO_SHORT_STATUS2_LDO9_SHIFT 0 +#define PALMAS_LDO_SHORT_STATUS2_LDO9_SHIFT 0x00 /* Registers for function PMU_CONTROL */ -#define PALMAS_DEV_CTRL 0x0 -#define PALMAS_POWER_CTRL 0x1 -#define PALMAS_VSYS_LO 0x2 -#define PALMAS_VSYS_MON 0x3 -#define PALMAS_VBAT_MON 0x4 -#define PALMAS_WATCHDOG 0x5 -#define PALMAS_BOOT_STATUS 0x6 -#define PALMAS_BATTERY_BOUNCE 0x7 -#define PALMAS_BACKUP_BATTERY_CTRL 0x8 -#define PALMAS_LONG_PRESS_KEY 0x9 -#define PALMAS_OSC_THERM_CTRL 0xA -#define PALMAS_BATDEBOUNCING 0xB -#define PALMAS_SWOFF_HWRST 0xF +#define PALMAS_DEV_CTRL 0x00 +#define PALMAS_POWER_CTRL 0x01 +#define PALMAS_VSYS_LO 0x02 +#define PALMAS_VSYS_MON 0x03 +#define PALMAS_VBAT_MON 0x04 +#define PALMAS_WATCHDOG 0x05 +#define PALMAS_BOOT_STATUS 0x06 +#define PALMAS_BATTERY_BOUNCE 0x07 +#define PALMAS_BACKUP_BATTERY_CTRL 0x08 +#define PALMAS_LONG_PRESS_KEY 0x09 +#define PALMAS_OSC_THERM_CTRL 0x0A +#define PALMAS_BATDEBOUNCING 0x0B +#define PALMAS_SWOFF_HWRST 0x0F #define PALMAS_SWOFF_COLDRST 0x10 #define PALMAS_SWOFF_STATUS 0x11 #define PALMAS_PMU_CONFIG 0x12 @@ -1296,668 +1296,668 @@ enum usb_irq_events { /* Bit definitions for DEV_CTRL */ #define PALMAS_DEV_CTRL_DEV_STATUS_MASK 0x0c -#define PALMAS_DEV_CTRL_DEV_STATUS_SHIFT 2 +#define PALMAS_DEV_CTRL_DEV_STATUS_SHIFT 0x02 #define PALMAS_DEV_CTRL_SW_RST 0x02 -#define PALMAS_DEV_CTRL_SW_RST_SHIFT 1 +#define PALMAS_DEV_CTRL_SW_RST_SHIFT 0x01 #define PALMAS_DEV_CTRL_DEV_ON 0x01 -#define PALMAS_DEV_CTRL_DEV_ON_SHIFT 0 +#define PALMAS_DEV_CTRL_DEV_ON_SHIFT 0x00 /* Bit definitions for POWER_CTRL */ #define PALMAS_POWER_CTRL_ENABLE2_MASK 0x04 -#define PALMAS_POWER_CTRL_ENABLE2_MASK_SHIFT 2 +#define PALMAS_POWER_CTRL_ENABLE2_MASK_SHIFT 0x02 #define PALMAS_POWER_CTRL_ENABLE1_MASK 0x02 -#define PALMAS_POWER_CTRL_ENABLE1_MASK_SHIFT 1 +#define PALMAS_POWER_CTRL_ENABLE1_MASK_SHIFT 0x01 #define PALMAS_POWER_CTRL_NSLEEP_MASK 0x01 -#define PALMAS_POWER_CTRL_NSLEEP_MASK_SHIFT 0 +#define PALMAS_POWER_CTRL_NSLEEP_MASK_SHIFT 0x00 /* Bit definitions for VSYS_LO */ -#define PALMAS_VSYS_LO_THRESHOLD_MASK 0x1f -#define PALMAS_VSYS_LO_THRESHOLD_SHIFT 0 +#define PALMAS_VSYS_LO_THRESHOLD_MASK 0x1F +#define PALMAS_VSYS_LO_THRESHOLD_SHIFT 0x00 /* Bit definitions for VSYS_MON */ #define PALMAS_VSYS_MON_ENABLE 0x80 -#define PALMAS_VSYS_MON_ENABLE_SHIFT 7 -#define PALMAS_VSYS_MON_THRESHOLD_MASK 0x3f -#define PALMAS_VSYS_MON_THRESHOLD_SHIFT 0 +#define PALMAS_VSYS_MON_ENABLE_SHIFT 0x07 +#define PALMAS_VSYS_MON_THRESHOLD_MASK 0x3F +#define PALMAS_VSYS_MON_THRESHOLD_SHIFT 0x00 /* Bit definitions for VBAT_MON */ #define PALMAS_VBAT_MON_ENABLE 0x80 -#define PALMAS_VBAT_MON_ENABLE_SHIFT 7 -#define PALMAS_VBAT_MON_THRESHOLD_MASK 0x3f -#define PALMAS_VBAT_MON_THRESHOLD_SHIFT 0 +#define PALMAS_VBAT_MON_ENABLE_SHIFT 0x07 +#define PALMAS_VBAT_MON_THRESHOLD_MASK 0x3F +#define PALMAS_VBAT_MON_THRESHOLD_SHIFT 0x00 /* Bit definitions for WATCHDOG */ #define PALMAS_WATCHDOG_LOCK 0x20 -#define PALMAS_WATCHDOG_LOCK_SHIFT 5 +#define PALMAS_WATCHDOG_LOCK_SHIFT 0x05 #define PALMAS_WATCHDOG_ENABLE 0x10 -#define PALMAS_WATCHDOG_ENABLE_SHIFT 4 +#define PALMAS_WATCHDOG_ENABLE_SHIFT 0x04 #define PALMAS_WATCHDOG_MODE 0x08 -#define PALMAS_WATCHDOG_MODE_SHIFT 3 +#define PALMAS_WATCHDOG_MODE_SHIFT 0x03 #define PALMAS_WATCHDOG_TIMER_MASK 0x07 -#define PALMAS_WATCHDOG_TIMER_SHIFT 0 +#define PALMAS_WATCHDOG_TIMER_SHIFT 0x00 /* Bit definitions for BOOT_STATUS */ #define PALMAS_BOOT_STATUS_BOOT1 0x02 -#define PALMAS_BOOT_STATUS_BOOT1_SHIFT 1 +#define PALMAS_BOOT_STATUS_BOOT1_SHIFT 0x01 #define PALMAS_BOOT_STATUS_BOOT0 0x01 -#define PALMAS_BOOT_STATUS_BOOT0_SHIFT 0 +#define PALMAS_BOOT_STATUS_BOOT0_SHIFT 0x00 /* Bit definitions for BATTERY_BOUNCE */ -#define PALMAS_BATTERY_BOUNCE_BB_DELAY_MASK 0x3f -#define PALMAS_BATTERY_BOUNCE_BB_DELAY_SHIFT 0 +#define PALMAS_BATTERY_BOUNCE_BB_DELAY_MASK 0x3F +#define PALMAS_BATTERY_BOUNCE_BB_DELAY_SHIFT 0x00 /* Bit definitions for BACKUP_BATTERY_CTRL */ #define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15 0x80 -#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15_SHIFT 7 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15_SHIFT 0x07 #define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP 0x40 -#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP_SHIFT 6 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP_SHIFT 0x06 #define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF 0x20 -#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF_SHIFT 5 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF_SHIFT 0x05 #define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN 0x10 -#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN_SHIFT 4 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN_SHIFT 0x04 #define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG 0x08 -#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG_SHIFT 3 +#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG_SHIFT 0x03 #define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_MASK 0x06 -#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_SHIFT 1 +#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_SHIFT 0x01 #define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN 0x01 -#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN_SHIFT 0 +#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN_SHIFT 0x00 /* Bit definitions for LONG_PRESS_KEY */ #define PALMAS_LONG_PRESS_KEY_LPK_LOCK 0x80 -#define PALMAS_LONG_PRESS_KEY_LPK_LOCK_SHIFT 7 +#define PALMAS_LONG_PRESS_KEY_LPK_LOCK_SHIFT 0x07 #define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR 0x10 -#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR_SHIFT 4 +#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR_SHIFT 0x04 #define PALMAS_LONG_PRESS_KEY_LPK_TIME_MASK 0x0c -#define PALMAS_LONG_PRESS_KEY_LPK_TIME_SHIFT 2 +#define PALMAS_LONG_PRESS_KEY_LPK_TIME_SHIFT 0x02 #define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_MASK 0x03 -#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_SHIFT 0 +#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_SHIFT 0x00 /* Bit definitions for OSC_THERM_CTRL */ #define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP 0x80 -#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP_SHIFT 7 +#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP_SHIFT 0x07 #define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP 0x40 -#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP_SHIFT 6 +#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP_SHIFT 0x06 #define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP 0x20 -#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP_SHIFT 5 +#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP_SHIFT 0x05 #define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP 0x10 -#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP_SHIFT 4 +#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP_SHIFT 0x04 #define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_MASK 0x0c -#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_SHIFT 2 +#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_SHIFT 0x02 #define PALMAS_OSC_THERM_CTRL_OSC_BYPASS 0x02 -#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS_SHIFT 1 +#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS_SHIFT 0x01 #define PALMAS_OSC_THERM_CTRL_OSC_HPMODE 0x01 -#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE_SHIFT 0 +#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE_SHIFT 0x00 /* Bit definitions for BATDEBOUNCING */ #define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS 0x80 -#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS_SHIFT 7 +#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS_SHIFT 0x07 #define PALMAS_BATDEBOUNCING_BINS_DEB_MASK 0x78 -#define PALMAS_BATDEBOUNCING_BINS_DEB_SHIFT 3 +#define PALMAS_BATDEBOUNCING_BINS_DEB_SHIFT 0x03 #define PALMAS_BATDEBOUNCING_BEXT_DEB_MASK 0x07 -#define PALMAS_BATDEBOUNCING_BEXT_DEB_SHIFT 0 +#define PALMAS_BATDEBOUNCING_BEXT_DEB_SHIFT 0x00 /* Bit definitions for SWOFF_HWRST */ #define PALMAS_SWOFF_HWRST_PWRON_LPK 0x80 -#define PALMAS_SWOFF_HWRST_PWRON_LPK_SHIFT 7 +#define PALMAS_SWOFF_HWRST_PWRON_LPK_SHIFT 0x07 #define PALMAS_SWOFF_HWRST_PWRDOWN 0x40 -#define PALMAS_SWOFF_HWRST_PWRDOWN_SHIFT 6 +#define PALMAS_SWOFF_HWRST_PWRDOWN_SHIFT 0x06 #define PALMAS_SWOFF_HWRST_WTD 0x20 -#define PALMAS_SWOFF_HWRST_WTD_SHIFT 5 +#define PALMAS_SWOFF_HWRST_WTD_SHIFT 0x05 #define PALMAS_SWOFF_HWRST_TSHUT 0x10 -#define PALMAS_SWOFF_HWRST_TSHUT_SHIFT 4 +#define PALMAS_SWOFF_HWRST_TSHUT_SHIFT 0x04 #define PALMAS_SWOFF_HWRST_RESET_IN 0x08 -#define PALMAS_SWOFF_HWRST_RESET_IN_SHIFT 3 +#define PALMAS_SWOFF_HWRST_RESET_IN_SHIFT 0x03 #define PALMAS_SWOFF_HWRST_SW_RST 0x04 -#define PALMAS_SWOFF_HWRST_SW_RST_SHIFT 2 +#define PALMAS_SWOFF_HWRST_SW_RST_SHIFT 0x02 #define PALMAS_SWOFF_HWRST_VSYS_LO 0x02 -#define PALMAS_SWOFF_HWRST_VSYS_LO_SHIFT 1 +#define PALMAS_SWOFF_HWRST_VSYS_LO_SHIFT 0x01 #define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN 0x01 -#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN_SHIFT 0 +#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN_SHIFT 0x00 /* Bit definitions for SWOFF_COLDRST */ #define PALMAS_SWOFF_COLDRST_PWRON_LPK 0x80 -#define PALMAS_SWOFF_COLDRST_PWRON_LPK_SHIFT 7 +#define PALMAS_SWOFF_COLDRST_PWRON_LPK_SHIFT 0x07 #define PALMAS_SWOFF_COLDRST_PWRDOWN 0x40 -#define PALMAS_SWOFF_COLDRST_PWRDOWN_SHIFT 6 +#define PALMAS_SWOFF_COLDRST_PWRDOWN_SHIFT 0x06 #define PALMAS_SWOFF_COLDRST_WTD 0x20 -#define PALMAS_SWOFF_COLDRST_WTD_SHIFT 5 +#define PALMAS_SWOFF_COLDRST_WTD_SHIFT 0x05 #define PALMAS_SWOFF_COLDRST_TSHUT 0x10 -#define PALMAS_SWOFF_COLDRST_TSHUT_SHIFT 4 +#define PALMAS_SWOFF_COLDRST_TSHUT_SHIFT 0x04 #define PALMAS_SWOFF_COLDRST_RESET_IN 0x08 -#define PALMAS_SWOFF_COLDRST_RESET_IN_SHIFT 3 +#define PALMAS_SWOFF_COLDRST_RESET_IN_SHIFT 0x03 #define PALMAS_SWOFF_COLDRST_SW_RST 0x04 -#define PALMAS_SWOFF_COLDRST_SW_RST_SHIFT 2 +#define PALMAS_SWOFF_COLDRST_SW_RST_SHIFT 0x02 #define PALMAS_SWOFF_COLDRST_VSYS_LO 0x02 -#define PALMAS_SWOFF_COLDRST_VSYS_LO_SHIFT 1 +#define PALMAS_SWOFF_COLDRST_VSYS_LO_SHIFT 0x01 #define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN 0x01 -#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN_SHIFT 0 +#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN_SHIFT 0x00 /* Bit definitions for SWOFF_STATUS */ #define PALMAS_SWOFF_STATUS_PWRON_LPK 0x80 -#define PALMAS_SWOFF_STATUS_PWRON_LPK_SHIFT 7 +#define PALMAS_SWOFF_STATUS_PWRON_LPK_SHIFT 0x07 #define PALMAS_SWOFF_STATUS_PWRDOWN 0x40 -#define PALMAS_SWOFF_STATUS_PWRDOWN_SHIFT 6 +#define PALMAS_SWOFF_STATUS_PWRDOWN_SHIFT 0x06 #define PALMAS_SWOFF_STATUS_WTD 0x20 -#define PALMAS_SWOFF_STATUS_WTD_SHIFT 5 +#define PALMAS_SWOFF_STATUS_WTD_SHIFT 0x05 #define PALMAS_SWOFF_STATUS_TSHUT 0x10 -#define PALMAS_SWOFF_STATUS_TSHUT_SHIFT 4 +#define PALMAS_SWOFF_STATUS_TSHUT_SHIFT 0x04 #define PALMAS_SWOFF_STATUS_RESET_IN 0x08 -#define PALMAS_SWOFF_STATUS_RESET_IN_SHIFT 3 +#define PALMAS_SWOFF_STATUS_RESET_IN_SHIFT 0x03 #define PALMAS_SWOFF_STATUS_SW_RST 0x04 -#define PALMAS_SWOFF_STATUS_SW_RST_SHIFT 2 +#define PALMAS_SWOFF_STATUS_SW_RST_SHIFT 0x02 #define PALMAS_SWOFF_STATUS_VSYS_LO 0x02 -#define PALMAS_SWOFF_STATUS_VSYS_LO_SHIFT 1 +#define PALMAS_SWOFF_STATUS_VSYS_LO_SHIFT 0x01 #define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN 0x01 -#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN_SHIFT 0 +#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN_SHIFT 0x00 /* Bit definitions for PMU_CONFIG */ #define PALMAS_PMU_CONFIG_MULTI_CELL_EN 0x40 -#define PALMAS_PMU_CONFIG_MULTI_CELL_EN_SHIFT 6 +#define PALMAS_PMU_CONFIG_MULTI_CELL_EN_SHIFT 0x06 #define PALMAS_PMU_CONFIG_SPARE_MASK 0x30 -#define PALMAS_PMU_CONFIG_SPARE_SHIFT 4 +#define PALMAS_PMU_CONFIG_SPARE_SHIFT 0x04 #define PALMAS_PMU_CONFIG_SWOFF_DLY_MASK 0x0c -#define PALMAS_PMU_CONFIG_SWOFF_DLY_SHIFT 2 +#define PALMAS_PMU_CONFIG_SWOFF_DLY_SHIFT 0x02 #define PALMAS_PMU_CONFIG_GATE_RESET_OUT 0x02 -#define PALMAS_PMU_CONFIG_GATE_RESET_OUT_SHIFT 1 +#define PALMAS_PMU_CONFIG_GATE_RESET_OUT_SHIFT 0x01 #define PALMAS_PMU_CONFIG_AUTODEVON 0x01 -#define PALMAS_PMU_CONFIG_AUTODEVON_SHIFT 0 +#define PALMAS_PMU_CONFIG_AUTODEVON_SHIFT 0x00 /* Bit definitions for SPARE */ #define PALMAS_SPARE_SPARE_MASK 0xf8 -#define PALMAS_SPARE_SPARE_SHIFT 3 +#define PALMAS_SPARE_SPARE_SHIFT 0x03 #define PALMAS_SPARE_REGEN3_OD 0x04 -#define PALMAS_SPARE_REGEN3_OD_SHIFT 2 +#define PALMAS_SPARE_REGEN3_OD_SHIFT 0x02 #define PALMAS_SPARE_REGEN2_OD 0x02 -#define PALMAS_SPARE_REGEN2_OD_SHIFT 1 +#define PALMAS_SPARE_REGEN2_OD_SHIFT 0x01 #define PALMAS_SPARE_REGEN1_OD 0x01 -#define PALMAS_SPARE_REGEN1_OD_SHIFT 0 +#define PALMAS_SPARE_REGEN1_OD_SHIFT 0x00 /* Bit definitions for PMU_SECONDARY_INT */ #define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC 0x80 -#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC_SHIFT 7 +#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC_SHIFT 0x07 #define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC 0x40 -#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC_SHIFT 6 +#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC_SHIFT 0x06 #define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC 0x20 -#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC_SHIFT 5 +#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC_SHIFT 0x05 #define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC 0x10 -#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC_SHIFT 4 +#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC_SHIFT 0x04 #define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK 0x08 -#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK_SHIFT 3 +#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK_SHIFT 0x03 #define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK 0x04 -#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK_SHIFT 2 +#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK_SHIFT 0x02 #define PALMAS_PMU_SECONDARY_INT_BB_MASK 0x02 -#define PALMAS_PMU_SECONDARY_INT_BB_MASK_SHIFT 1 +#define PALMAS_PMU_SECONDARY_INT_BB_MASK_SHIFT 0x01 #define PALMAS_PMU_SECONDARY_INT_FBI_MASK 0x01 -#define PALMAS_PMU_SECONDARY_INT_FBI_MASK_SHIFT 0 +#define PALMAS_PMU_SECONDARY_INT_FBI_MASK_SHIFT 0x00 /* Bit definitions for SW_REVISION */ -#define PALMAS_SW_REVISION_SW_REVISION_MASK 0xff -#define PALMAS_SW_REVISION_SW_REVISION_SHIFT 0 +#define PALMAS_SW_REVISION_SW_REVISION_MASK 0xFF +#define PALMAS_SW_REVISION_SW_REVISION_SHIFT 0x00 /* Bit definitions for EXT_CHRG_CTRL */ #define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS 0x80 -#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS_SHIFT 7 +#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS_SHIFT 0x07 #define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS 0x40 -#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS_SHIFT 6 +#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS_SHIFT 0x06 #define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY 0x08 -#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY_SHIFT 3 +#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY_SHIFT 0x03 #define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N 0x04 -#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N_SHIFT 2 +#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N_SHIFT 0x02 #define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN 0x02 -#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN_SHIFT 1 +#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN_SHIFT 0x01 #define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN 0x01 -#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN_SHIFT 0 +#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN_SHIFT 0x00 /* Bit definitions for PMU_SECONDARY_INT2 */ #define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC 0x20 -#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC_SHIFT 5 +#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC_SHIFT 0x05 #define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC 0x10 -#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC_SHIFT 4 +#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC_SHIFT 0x04 #define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK 0x02 -#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK_SHIFT 1 +#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK_SHIFT 0x01 #define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK 0x01 -#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK_SHIFT 0 +#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK_SHIFT 0x00 /* Registers for function RESOURCE */ -#define PALMAS_CLK32KG_CTRL 0x0 -#define PALMAS_CLK32KGAUDIO_CTRL 0x1 -#define PALMAS_REGEN1_CTRL 0x2 -#define PALMAS_REGEN2_CTRL 0x3 -#define PALMAS_SYSEN1_CTRL 0x4 -#define PALMAS_SYSEN2_CTRL 0x5 -#define PALMAS_NSLEEP_RES_ASSIGN 0x6 -#define PALMAS_NSLEEP_SMPS_ASSIGN 0x7 -#define PALMAS_NSLEEP_LDO_ASSIGN1 0x8 -#define PALMAS_NSLEEP_LDO_ASSIGN2 0x9 -#define PALMAS_ENABLE1_RES_ASSIGN 0xA -#define PALMAS_ENABLE1_SMPS_ASSIGN 0xB -#define PALMAS_ENABLE1_LDO_ASSIGN1 0xC -#define PALMAS_ENABLE1_LDO_ASSIGN2 0xD -#define PALMAS_ENABLE2_RES_ASSIGN 0xE -#define PALMAS_ENABLE2_SMPS_ASSIGN 0xF +#define PALMAS_CLK32KG_CTRL 0x00 +#define PALMAS_CLK32KGAUDIO_CTRL 0x01 +#define PALMAS_REGEN1_CTRL 0x02 +#define PALMAS_REGEN2_CTRL 0x03 +#define PALMAS_SYSEN1_CTRL 0x04 +#define PALMAS_SYSEN2_CTRL 0x05 +#define PALMAS_NSLEEP_RES_ASSIGN 0x06 +#define PALMAS_NSLEEP_SMPS_ASSIGN 0x07 +#define PALMAS_NSLEEP_LDO_ASSIGN1 0x08 +#define PALMAS_NSLEEP_LDO_ASSIGN2 0x09 +#define PALMAS_ENABLE1_RES_ASSIGN 0x0A +#define PALMAS_ENABLE1_SMPS_ASSIGN 0x0B +#define PALMAS_ENABLE1_LDO_ASSIGN1 0x0C +#define PALMAS_ENABLE1_LDO_ASSIGN2 0x0D +#define PALMAS_ENABLE2_RES_ASSIGN 0x0E +#define PALMAS_ENABLE2_SMPS_ASSIGN 0x0F #define PALMAS_ENABLE2_LDO_ASSIGN1 0x10 #define PALMAS_ENABLE2_LDO_ASSIGN2 0x11 #define PALMAS_REGEN3_CTRL 0x12 /* Bit definitions for CLK32KG_CTRL */ #define PALMAS_CLK32KG_CTRL_STATUS 0x10 -#define PALMAS_CLK32KG_CTRL_STATUS_SHIFT 4 +#define PALMAS_CLK32KG_CTRL_STATUS_SHIFT 0x04 #define PALMAS_CLK32KG_CTRL_MODE_SLEEP 0x04 -#define PALMAS_CLK32KG_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_CLK32KG_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_CLK32KG_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for CLK32KGAUDIO_CTRL */ #define PALMAS_CLK32KGAUDIO_CTRL_STATUS 0x10 -#define PALMAS_CLK32KGAUDIO_CTRL_STATUS_SHIFT 4 +#define PALMAS_CLK32KGAUDIO_CTRL_STATUS_SHIFT 0x04 #define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3 0x08 -#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3_SHIFT 3 +#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3_SHIFT 0x03 #define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP 0x04 -#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for REGEN1_CTRL */ #define PALMAS_REGEN1_CTRL_STATUS 0x10 -#define PALMAS_REGEN1_CTRL_STATUS_SHIFT 4 +#define PALMAS_REGEN1_CTRL_STATUS_SHIFT 0x04 #define PALMAS_REGEN1_CTRL_MODE_SLEEP 0x04 -#define PALMAS_REGEN1_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_REGEN1_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_REGEN1_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for REGEN2_CTRL */ #define PALMAS_REGEN2_CTRL_STATUS 0x10 -#define PALMAS_REGEN2_CTRL_STATUS_SHIFT 4 +#define PALMAS_REGEN2_CTRL_STATUS_SHIFT 0x04 #define PALMAS_REGEN2_CTRL_MODE_SLEEP 0x04 -#define PALMAS_REGEN2_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_REGEN2_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_REGEN2_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SYSEN1_CTRL */ #define PALMAS_SYSEN1_CTRL_STATUS 0x10 -#define PALMAS_SYSEN1_CTRL_STATUS_SHIFT 4 +#define PALMAS_SYSEN1_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SYSEN1_CTRL_MODE_SLEEP 0x04 -#define PALMAS_SYSEN1_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SYSEN1_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SYSEN1_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for SYSEN2_CTRL */ #define PALMAS_SYSEN2_CTRL_STATUS 0x10 -#define PALMAS_SYSEN2_CTRL_STATUS_SHIFT 4 +#define PALMAS_SYSEN2_CTRL_STATUS_SHIFT 0x04 #define PALMAS_SYSEN2_CTRL_MODE_SLEEP 0x04 -#define PALMAS_SYSEN2_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_SYSEN2_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_SYSEN2_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Bit definitions for NSLEEP_RES_ASSIGN */ #define PALMAS_NSLEEP_RES_ASSIGN_REGEN3 0x40 -#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 6 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 0x06 #define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO 0x20 -#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO_SHIFT 5 +#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05 #define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG 0x10 -#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG_SHIFT 4 +#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG_SHIFT 0x04 #define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2 0x08 -#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2_SHIFT 3 +#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2_SHIFT 0x03 #define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1 0x04 -#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1_SHIFT 2 +#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1_SHIFT 0x02 #define PALMAS_NSLEEP_RES_ASSIGN_REGEN2 0x02 -#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 1 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 0x01 #define PALMAS_NSLEEP_RES_ASSIGN_REGEN1 0x01 -#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0x00 /* Bit definitions for NSLEEP_SMPS_ASSIGN */ #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10 0x80 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10_SHIFT 7 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10_SHIFT 0x07 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9 0x40 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9_SHIFT 6 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9_SHIFT 0x06 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8 0x20 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8_SHIFT 5 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8_SHIFT 0x05 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7 0x10 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7_SHIFT 4 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7_SHIFT 0x04 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6 0x08 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6_SHIFT 3 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6_SHIFT 0x03 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45 0x04 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45_SHIFT 2 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45_SHIFT 0x02 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3 0x02 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 1 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 0x01 #define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12 0x01 -#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12_SHIFT 0 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12_SHIFT 0x00 /* Bit definitions for NSLEEP_LDO_ASSIGN1 */ #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8 0x80 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8_SHIFT 7 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8_SHIFT 0x07 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7 0x40 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7_SHIFT 6 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7_SHIFT 0x06 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6 0x20 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6_SHIFT 5 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6_SHIFT 0x05 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5 0x10 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5_SHIFT 4 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5_SHIFT 0x04 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4 0x08 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 3 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 0x03 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3 0x04 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3_SHIFT 2 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3_SHIFT 0x02 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2 0x02 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 1 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 0x01 #define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1 0x01 -#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0x00 /* Bit definitions for NSLEEP_LDO_ASSIGN2 */ #define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB 0x04 -#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB_SHIFT 2 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB_SHIFT 0x02 #define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN 0x02 -#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN_SHIFT 1 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN_SHIFT 0x01 #define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9 0x01 -#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9_SHIFT 0 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9_SHIFT 0x00 /* Bit definitions for ENABLE1_RES_ASSIGN */ #define PALMAS_ENABLE1_RES_ASSIGN_REGEN3 0x40 -#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 6 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 0x06 #define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO 0x20 -#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO_SHIFT 5 +#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05 #define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG 0x10 -#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG_SHIFT 4 +#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG_SHIFT 0x04 #define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2 0x08 -#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2_SHIFT 3 +#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2_SHIFT 0x03 #define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1 0x04 -#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1_SHIFT 2 +#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1_SHIFT 0x02 #define PALMAS_ENABLE1_RES_ASSIGN_REGEN2 0x02 -#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 1 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 0x01 #define PALMAS_ENABLE1_RES_ASSIGN_REGEN1 0x01 -#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0x00 /* Bit definitions for ENABLE1_SMPS_ASSIGN */ #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10 0x80 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10_SHIFT 7 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10_SHIFT 0x07 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9 0x40 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9_SHIFT 6 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9_SHIFT 0x06 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8 0x20 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8_SHIFT 5 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8_SHIFT 0x05 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7 0x10 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7_SHIFT 4 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7_SHIFT 0x04 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6 0x08 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6_SHIFT 3 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6_SHIFT 0x03 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45 0x04 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45_SHIFT 2 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45_SHIFT 0x02 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3 0x02 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 1 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 0x01 #define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12 0x01 -#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12_SHIFT 0 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12_SHIFT 0x00 /* Bit definitions for ENABLE1_LDO_ASSIGN1 */ #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8 0x80 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8_SHIFT 7 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8_SHIFT 0x07 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7 0x40 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7_SHIFT 6 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7_SHIFT 0x06 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6 0x20 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6_SHIFT 5 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6_SHIFT 0x05 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5 0x10 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5_SHIFT 4 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5_SHIFT 0x04 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4 0x08 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 3 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 0x03 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3 0x04 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3_SHIFT 2 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3_SHIFT 0x02 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2 0x02 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 1 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 0x01 #define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1 0x01 -#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0x00 /* Bit definitions for ENABLE1_LDO_ASSIGN2 */ #define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB 0x04 -#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB_SHIFT 2 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB_SHIFT 0x02 #define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN 0x02 -#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN_SHIFT 1 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN_SHIFT 0x01 #define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9 0x01 -#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9_SHIFT 0 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9_SHIFT 0x00 /* Bit definitions for ENABLE2_RES_ASSIGN */ #define PALMAS_ENABLE2_RES_ASSIGN_REGEN3 0x40 -#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 6 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 0x06 #define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO 0x20 -#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO_SHIFT 5 +#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05 #define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG 0x10 -#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG_SHIFT 4 +#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG_SHIFT 0x04 #define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2 0x08 -#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2_SHIFT 3 +#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2_SHIFT 0x03 #define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1 0x04 -#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1_SHIFT 2 +#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1_SHIFT 0x02 #define PALMAS_ENABLE2_RES_ASSIGN_REGEN2 0x02 -#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 1 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 0x01 #define PALMAS_ENABLE2_RES_ASSIGN_REGEN1 0x01 -#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0x00 /* Bit definitions for ENABLE2_SMPS_ASSIGN */ #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10 0x80 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10_SHIFT 7 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10_SHIFT 0x07 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9 0x40 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9_SHIFT 6 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9_SHIFT 0x06 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8 0x20 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8_SHIFT 5 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8_SHIFT 0x05 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7 0x10 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7_SHIFT 4 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7_SHIFT 0x04 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6 0x08 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6_SHIFT 3 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6_SHIFT 0x03 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45 0x04 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45_SHIFT 2 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45_SHIFT 0x02 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3 0x02 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 1 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 0x01 #define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12 0x01 -#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12_SHIFT 0 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12_SHIFT 0x00 /* Bit definitions for ENABLE2_LDO_ASSIGN1 */ #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8 0x80 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8_SHIFT 7 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8_SHIFT 0x07 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7 0x40 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7_SHIFT 6 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7_SHIFT 0x06 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6 0x20 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6_SHIFT 5 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6_SHIFT 0x05 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5 0x10 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5_SHIFT 4 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5_SHIFT 0x04 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4 0x08 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 3 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 0x03 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3 0x04 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3_SHIFT 2 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3_SHIFT 0x02 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2 0x02 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 1 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 0x01 #define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1 0x01 -#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0x00 /* Bit definitions for ENABLE2_LDO_ASSIGN2 */ #define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB 0x04 -#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB_SHIFT 2 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB_SHIFT 0x02 #define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN 0x02 -#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN_SHIFT 1 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN_SHIFT 0x01 #define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9 0x01 -#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9_SHIFT 0 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9_SHIFT 0x00 /* Bit definitions for REGEN3_CTRL */ #define PALMAS_REGEN3_CTRL_STATUS 0x10 -#define PALMAS_REGEN3_CTRL_STATUS_SHIFT 4 +#define PALMAS_REGEN3_CTRL_STATUS_SHIFT 0x04 #define PALMAS_REGEN3_CTRL_MODE_SLEEP 0x04 -#define PALMAS_REGEN3_CTRL_MODE_SLEEP_SHIFT 2 +#define PALMAS_REGEN3_CTRL_MODE_SLEEP_SHIFT 0x02 #define PALMAS_REGEN3_CTRL_MODE_ACTIVE 0x01 -#define PALMAS_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0 +#define PALMAS_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 /* Registers for function PAD_CONTROL */ -#define PALMAS_OD_OUTPUT_CTRL2 0x2 -#define PALMAS_POLARITY_CTRL2 0x3 -#define PALMAS_PU_PD_INPUT_CTRL1 0x4 -#define PALMAS_PU_PD_INPUT_CTRL2 0x5 -#define PALMAS_PU_PD_INPUT_CTRL3 0x6 -#define PALMAS_PU_PD_INPUT_CTRL5 0x7 -#define PALMAS_OD_OUTPUT_CTRL 0x8 -#define PALMAS_POLARITY_CTRL 0x9 -#define PALMAS_PRIMARY_SECONDARY_PAD1 0xA -#define PALMAS_PRIMARY_SECONDARY_PAD2 0xB -#define PALMAS_I2C_SPI 0xC -#define PALMAS_PU_PD_INPUT_CTRL4 0xD -#define PALMAS_PRIMARY_SECONDARY_PAD3 0xE -#define PALMAS_PRIMARY_SECONDARY_PAD4 0xF +#define PALMAS_OD_OUTPUT_CTRL2 0x02 +#define PALMAS_POLARITY_CTRL2 0x03 +#define PALMAS_PU_PD_INPUT_CTRL1 0x04 +#define PALMAS_PU_PD_INPUT_CTRL2 0x05 +#define PALMAS_PU_PD_INPUT_CTRL3 0x06 +#define PALMAS_PU_PD_INPUT_CTRL5 0x07 +#define PALMAS_OD_OUTPUT_CTRL 0x08 +#define PALMAS_POLARITY_CTRL 0x09 +#define PALMAS_PRIMARY_SECONDARY_PAD1 0x0A +#define PALMAS_PRIMARY_SECONDARY_PAD2 0x0B +#define PALMAS_I2C_SPI 0x0C +#define PALMAS_PU_PD_INPUT_CTRL4 0x0D +#define PALMAS_PRIMARY_SECONDARY_PAD3 0x0E +#define PALMAS_PRIMARY_SECONDARY_PAD4 0x0F /* Bit definitions for PU_PD_INPUT_CTRL1 */ #define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD 0x40 -#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD_SHIFT 6 +#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD_SHIFT 0x06 #define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU 0x20 -#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU_SHIFT 5 +#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU_SHIFT 0x05 #define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD 0x10 -#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD_SHIFT 4 +#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD_SHIFT 0x04 #define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD 0x04 -#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD_SHIFT 2 +#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD_SHIFT 0x02 #define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU 0x02 -#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU_SHIFT 1 +#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU_SHIFT 0x01 /* Bit definitions for PU_PD_INPUT_CTRL2 */ #define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU 0x20 -#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU_SHIFT 5 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU_SHIFT 0x05 #define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD 0x10 -#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD_SHIFT 4 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD_SHIFT 0x04 #define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU 0x08 -#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU_SHIFT 3 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU_SHIFT 0x03 #define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD 0x04 -#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD_SHIFT 2 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD_SHIFT 0x02 #define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU 0x02 -#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU_SHIFT 1 +#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU_SHIFT 0x01 #define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD 0x01 -#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD_SHIFT 0 +#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD_SHIFT 0x00 /* Bit definitions for PU_PD_INPUT_CTRL3 */ #define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD 0x40 -#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD_SHIFT 6 +#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD_SHIFT 0x06 #define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD 0x10 -#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD_SHIFT 4 +#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD_SHIFT 0x04 #define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD 0x04 -#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD_SHIFT 2 +#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD_SHIFT 0x02 #define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD 0x01 -#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD_SHIFT 0 +#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD_SHIFT 0x00 /* Bit definitions for OD_OUTPUT_CTRL */ #define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD 0x80 -#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD_SHIFT 7 +#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD_SHIFT 0x07 #define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD 0x40 -#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD_SHIFT 6 +#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD_SHIFT 0x06 #define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD 0x20 -#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD_SHIFT 5 +#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD_SHIFT 0x05 #define PALMAS_OD_OUTPUT_CTRL_INT_OD 0x08 -#define PALMAS_OD_OUTPUT_CTRL_INT_OD_SHIFT 3 +#define PALMAS_OD_OUTPUT_CTRL_INT_OD_SHIFT 0x03 /* Bit definitions for POLARITY_CTRL */ #define PALMAS_POLARITY_CTRL_INT_POLARITY 0x80 -#define PALMAS_POLARITY_CTRL_INT_POLARITY_SHIFT 7 +#define PALMAS_POLARITY_CTRL_INT_POLARITY_SHIFT 0x07 #define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY 0x40 -#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY_SHIFT 6 +#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY_SHIFT 0x06 #define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY 0x20 -#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY_SHIFT 5 +#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY_SHIFT 0x05 #define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY 0x10 -#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY_SHIFT 4 +#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY_SHIFT 0x04 #define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY 0x08 -#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY_SHIFT 3 +#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY_SHIFT 0x03 #define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY 0x04 -#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY_SHIFT 2 +#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY_SHIFT 0x02 #define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY 0x02 -#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY_SHIFT 1 +#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY_SHIFT 0x01 #define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY 0x01 -#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY_SHIFT 0 +#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY_SHIFT 0x00 /* Bit definitions for PRIMARY_SECONDARY_PAD1 */ #define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3 0x80 -#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3_SHIFT 7 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3_SHIFT 0x07 #define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK 0x60 -#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT 5 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT 0x05 #define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK 0x18 -#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT 3 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT 0x03 #define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0 0x04 -#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0_SHIFT 2 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0_SHIFT 0x02 #define PALMAS_PRIMARY_SECONDARY_PAD1_VAC 0x02 -#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC_SHIFT 1 +#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC_SHIFT 0x01 #define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD 0x01 -#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD_SHIFT 0 +#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD_SHIFT 0x00 /* Bit definitions for PRIMARY_SECONDARY_PAD2 */ #define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK 0x30 -#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_SHIFT 4 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_SHIFT 0x04 #define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6 0x08 -#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6_SHIFT 3 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6_SHIFT 0x03 #define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0x06 -#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_SHIFT 1 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_SHIFT 0x01 #define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4 0x01 -#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4_SHIFT 0 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4_SHIFT 0x00 /* Bit definitions for I2C_SPI */ #define PALMAS_I2C_SPI_I2C2OTP_EN 0x80 -#define PALMAS_I2C_SPI_I2C2OTP_EN_SHIFT 7 +#define PALMAS_I2C_SPI_I2C2OTP_EN_SHIFT 0x07 #define PALMAS_I2C_SPI_I2C2OTP_PAGESEL 0x40 -#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL_SHIFT 6 +#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL_SHIFT 0x06 #define PALMAS_I2C_SPI_ID_I2C2 0x20 -#define PALMAS_I2C_SPI_ID_I2C2_SHIFT 5 +#define PALMAS_I2C_SPI_ID_I2C2_SHIFT 0x05 #define PALMAS_I2C_SPI_I2C_SPI 0x10 -#define PALMAS_I2C_SPI_I2C_SPI_SHIFT 4 -#define PALMAS_I2C_SPI_ID_I2C1_MASK 0x0f -#define PALMAS_I2C_SPI_ID_I2C1_SHIFT 0 +#define PALMAS_I2C_SPI_I2C_SPI_SHIFT 0x04 +#define PALMAS_I2C_SPI_ID_I2C1_MASK 0x0F +#define PALMAS_I2C_SPI_ID_I2C1_SHIFT 0x00 /* Bit definitions for PU_PD_INPUT_CTRL4 */ #define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD 0x40 -#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD_SHIFT 6 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD_SHIFT 0x06 #define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD 0x10 -#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD_SHIFT 4 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD_SHIFT 0x04 #define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD 0x04 -#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD_SHIFT 2 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD_SHIFT 0x02 #define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD 0x01 -#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD_SHIFT 0 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD_SHIFT 0x00 /* Bit definitions for PRIMARY_SECONDARY_PAD3 */ #define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2 0x02 -#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2_SHIFT 1 +#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2_SHIFT 0x01 #define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1 0x01 -#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1_SHIFT 0 +#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1_SHIFT 0x00 /* Registers for function LED_PWM */ -#define PALMAS_LED_PERIOD_CTRL 0x0 -#define PALMAS_LED_CTRL 0x1 -#define PALMAS_PWM_CTRL1 0x2 -#define PALMAS_PWM_CTRL2 0x3 +#define PALMAS_LED_PERIOD_CTRL 0x00 +#define PALMAS_LED_CTRL 0x01 +#define PALMAS_PWM_CTRL1 0x02 +#define PALMAS_PWM_CTRL2 0x03 /* Bit definitions for LED_PERIOD_CTRL */ #define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_MASK 0x38 -#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_SHIFT 3 +#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_SHIFT 0x03 #define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_MASK 0x07 -#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_SHIFT 0 +#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_SHIFT 0x00 /* Bit definitions for LED_CTRL */ #define PALMAS_LED_CTRL_LED_2_SEQ 0x20 -#define PALMAS_LED_CTRL_LED_2_SEQ_SHIFT 5 +#define PALMAS_LED_CTRL_LED_2_SEQ_SHIFT 0x05 #define PALMAS_LED_CTRL_LED_1_SEQ 0x10 -#define PALMAS_LED_CTRL_LED_1_SEQ_SHIFT 4 +#define PALMAS_LED_CTRL_LED_1_SEQ_SHIFT 0x04 #define PALMAS_LED_CTRL_LED_2_ON_TIME_MASK 0x0c -#define PALMAS_LED_CTRL_LED_2_ON_TIME_SHIFT 2 +#define PALMAS_LED_CTRL_LED_2_ON_TIME_SHIFT 0x02 #define PALMAS_LED_CTRL_LED_1_ON_TIME_MASK 0x03 -#define PALMAS_LED_CTRL_LED_1_ON_TIME_SHIFT 0 +#define PALMAS_LED_CTRL_LED_1_ON_TIME_SHIFT 0x00 /* Bit definitions for PWM_CTRL1 */ #define PALMAS_PWM_CTRL1_PWM_FREQ_EN 0x02 -#define PALMAS_PWM_CTRL1_PWM_FREQ_EN_SHIFT 1 +#define PALMAS_PWM_CTRL1_PWM_FREQ_EN_SHIFT 0x01 #define PALMAS_PWM_CTRL1_PWM_FREQ_SEL 0x01 -#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL_SHIFT 0 +#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL_SHIFT 0x00 /* Bit definitions for PWM_CTRL2 */ -#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_MASK 0xff -#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_SHIFT 0 +#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_MASK 0xFF +#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_SHIFT 0x00 /* Registers for function INTERRUPT */ -#define PALMAS_INT1_STATUS 0x0 -#define PALMAS_INT1_MASK 0x1 -#define PALMAS_INT1_LINE_STATE 0x2 -#define PALMAS_INT1_EDGE_DETECT1_RESERVED 0x3 -#define PALMAS_INT1_EDGE_DETECT2_RESERVED 0x4 -#define PALMAS_INT2_STATUS 0x5 -#define PALMAS_INT2_MASK 0x6 -#define PALMAS_INT2_LINE_STATE 0x7 -#define PALMAS_INT2_EDGE_DETECT1_RESERVED 0x8 -#define PALMAS_INT2_EDGE_DETECT2_RESERVED 0x9 -#define PALMAS_INT3_STATUS 0xA -#define PALMAS_INT3_MASK 0xB -#define PALMAS_INT3_LINE_STATE 0xC -#define PALMAS_INT3_EDGE_DETECT1_RESERVED 0xD -#define PALMAS_INT3_EDGE_DETECT2_RESERVED 0xE -#define PALMAS_INT4_STATUS 0xF +#define PALMAS_INT1_STATUS 0x00 +#define PALMAS_INT1_MASK 0x01 +#define PALMAS_INT1_LINE_STATE 0x02 +#define PALMAS_INT1_EDGE_DETECT1_RESERVED 0x03 +#define PALMAS_INT1_EDGE_DETECT2_RESERVED 0x04 +#define PALMAS_INT2_STATUS 0x05 +#define PALMAS_INT2_MASK 0x06 +#define PALMAS_INT2_LINE_STATE 0x07 +#define PALMAS_INT2_EDGE_DETECT1_RESERVED 0x08 +#define PALMAS_INT2_EDGE_DETECT2_RESERVED 0x09 +#define PALMAS_INT3_STATUS 0x0A +#define PALMAS_INT3_MASK 0x0B +#define PALMAS_INT3_LINE_STATE 0x0C +#define PALMAS_INT3_EDGE_DETECT1_RESERVED 0x0D +#define PALMAS_INT3_EDGE_DETECT2_RESERVED 0x0E +#define PALMAS_INT4_STATUS 0x0F #define PALMAS_INT4_MASK 0x10 #define PALMAS_INT4_LINE_STATE 0x11 #define PALMAS_INT4_EDGE_DETECT1 0x12 @@ -1966,276 +1966,276 @@ enum usb_irq_events { /* Bit definitions for INT1_STATUS */ #define PALMAS_INT1_STATUS_VBAT_MON 0x80 -#define PALMAS_INT1_STATUS_VBAT_MON_SHIFT 7 +#define PALMAS_INT1_STATUS_VBAT_MON_SHIFT 0x07 #define PALMAS_INT1_STATUS_VSYS_MON 0x40 -#define PALMAS_INT1_STATUS_VSYS_MON_SHIFT 6 +#define PALMAS_INT1_STATUS_VSYS_MON_SHIFT 0x06 #define PALMAS_INT1_STATUS_HOTDIE 0x20 -#define PALMAS_INT1_STATUS_HOTDIE_SHIFT 5 +#define PALMAS_INT1_STATUS_HOTDIE_SHIFT 0x05 #define PALMAS_INT1_STATUS_PWRDOWN 0x10 -#define PALMAS_INT1_STATUS_PWRDOWN_SHIFT 4 +#define PALMAS_INT1_STATUS_PWRDOWN_SHIFT 0x04 #define PALMAS_INT1_STATUS_RPWRON 0x08 -#define PALMAS_INT1_STATUS_RPWRON_SHIFT 3 +#define PALMAS_INT1_STATUS_RPWRON_SHIFT 0x03 #define PALMAS_INT1_STATUS_LONG_PRESS_KEY 0x04 -#define PALMAS_INT1_STATUS_LONG_PRESS_KEY_SHIFT 2 +#define PALMAS_INT1_STATUS_LONG_PRESS_KEY_SHIFT 0x02 #define PALMAS_INT1_STATUS_PWRON 0x02 -#define PALMAS_INT1_STATUS_PWRON_SHIFT 1 +#define PALMAS_INT1_STATUS_PWRON_SHIFT 0x01 #define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV 0x01 -#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV_SHIFT 0 +#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV_SHIFT 0x00 /* Bit definitions for INT1_MASK */ #define PALMAS_INT1_MASK_VBAT_MON 0x80 -#define PALMAS_INT1_MASK_VBAT_MON_SHIFT 7 +#define PALMAS_INT1_MASK_VBAT_MON_SHIFT 0x07 #define PALMAS_INT1_MASK_VSYS_MON 0x40 -#define PALMAS_INT1_MASK_VSYS_MON_SHIFT 6 +#define PALMAS_INT1_MASK_VSYS_MON_SHIFT 0x06 #define PALMAS_INT1_MASK_HOTDIE 0x20 -#define PALMAS_INT1_MASK_HOTDIE_SHIFT 5 +#define PALMAS_INT1_MASK_HOTDIE_SHIFT 0x05 #define PALMAS_INT1_MASK_PWRDOWN 0x10 -#define PALMAS_INT1_MASK_PWRDOWN_SHIFT 4 +#define PALMAS_INT1_MASK_PWRDOWN_SHIFT 0x04 #define PALMAS_INT1_MASK_RPWRON 0x08 -#define PALMAS_INT1_MASK_RPWRON_SHIFT 3 +#define PALMAS_INT1_MASK_RPWRON_SHIFT 0x03 #define PALMAS_INT1_MASK_LONG_PRESS_KEY 0x04 -#define PALMAS_INT1_MASK_LONG_PRESS_KEY_SHIFT 2 +#define PALMAS_INT1_MASK_LONG_PRESS_KEY_SHIFT 0x02 #define PALMAS_INT1_MASK_PWRON 0x02 -#define PALMAS_INT1_MASK_PWRON_SHIFT 1 +#define PALMAS_INT1_MASK_PWRON_SHIFT 0x01 #define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV 0x01 -#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV_SHIFT 0 +#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV_SHIFT 0x00 /* Bit definitions for INT1_LINE_STATE */ #define PALMAS_INT1_LINE_STATE_VBAT_MON 0x80 -#define PALMAS_INT1_LINE_STATE_VBAT_MON_SHIFT 7 +#define PALMAS_INT1_LINE_STATE_VBAT_MON_SHIFT 0x07 #define PALMAS_INT1_LINE_STATE_VSYS_MON 0x40 -#define PALMAS_INT1_LINE_STATE_VSYS_MON_SHIFT 6 +#define PALMAS_INT1_LINE_STATE_VSYS_MON_SHIFT 0x06 #define PALMAS_INT1_LINE_STATE_HOTDIE 0x20 -#define PALMAS_INT1_LINE_STATE_HOTDIE_SHIFT 5 +#define PALMAS_INT1_LINE_STATE_HOTDIE_SHIFT 0x05 #define PALMAS_INT1_LINE_STATE_PWRDOWN 0x10 -#define PALMAS_INT1_LINE_STATE_PWRDOWN_SHIFT 4 +#define PALMAS_INT1_LINE_STATE_PWRDOWN_SHIFT 0x04 #define PALMAS_INT1_LINE_STATE_RPWRON 0x08 -#define PALMAS_INT1_LINE_STATE_RPWRON_SHIFT 3 +#define PALMAS_INT1_LINE_STATE_RPWRON_SHIFT 0x03 #define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY 0x04 -#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 2 +#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 0x02 #define PALMAS_INT1_LINE_STATE_PWRON 0x02 -#define PALMAS_INT1_LINE_STATE_PWRON_SHIFT 1 +#define PALMAS_INT1_LINE_STATE_PWRON_SHIFT 0x01 #define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV 0x01 -#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV_SHIFT 0 +#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV_SHIFT 0x00 /* Bit definitions for INT2_STATUS */ #define PALMAS_INT2_STATUS_VAC_ACOK 0x80 -#define PALMAS_INT2_STATUS_VAC_ACOK_SHIFT 7 +#define PALMAS_INT2_STATUS_VAC_ACOK_SHIFT 0x07 #define PALMAS_INT2_STATUS_SHORT 0x40 -#define PALMAS_INT2_STATUS_SHORT_SHIFT 6 +#define PALMAS_INT2_STATUS_SHORT_SHIFT 0x06 #define PALMAS_INT2_STATUS_FBI_BB 0x20 -#define PALMAS_INT2_STATUS_FBI_BB_SHIFT 5 +#define PALMAS_INT2_STATUS_FBI_BB_SHIFT 0x05 #define PALMAS_INT2_STATUS_RESET_IN 0x10 -#define PALMAS_INT2_STATUS_RESET_IN_SHIFT 4 +#define PALMAS_INT2_STATUS_RESET_IN_SHIFT 0x04 #define PALMAS_INT2_STATUS_BATREMOVAL 0x08 -#define PALMAS_INT2_STATUS_BATREMOVAL_SHIFT 3 +#define PALMAS_INT2_STATUS_BATREMOVAL_SHIFT 0x03 #define PALMAS_INT2_STATUS_WDT 0x04 -#define PALMAS_INT2_STATUS_WDT_SHIFT 2 +#define PALMAS_INT2_STATUS_WDT_SHIFT 0x02 #define PALMAS_INT2_STATUS_RTC_TIMER 0x02 -#define PALMAS_INT2_STATUS_RTC_TIMER_SHIFT 1 +#define PALMAS_INT2_STATUS_RTC_TIMER_SHIFT 0x01 #define PALMAS_INT2_STATUS_RTC_ALARM 0x01 -#define PALMAS_INT2_STATUS_RTC_ALARM_SHIFT 0 +#define PALMAS_INT2_STATUS_RTC_ALARM_SHIFT 0x00 /* Bit definitions for INT2_MASK */ #define PALMAS_INT2_MASK_VAC_ACOK 0x80 -#define PALMAS_INT2_MASK_VAC_ACOK_SHIFT 7 +#define PALMAS_INT2_MASK_VAC_ACOK_SHIFT 0x07 #define PALMAS_INT2_MASK_SHORT 0x40 -#define PALMAS_INT2_MASK_SHORT_SHIFT 6 +#define PALMAS_INT2_MASK_SHORT_SHIFT 0x06 #define PALMAS_INT2_MASK_FBI_BB 0x20 -#define PALMAS_INT2_MASK_FBI_BB_SHIFT 5 +#define PALMAS_INT2_MASK_FBI_BB_SHIFT 0x05 #define PALMAS_INT2_MASK_RESET_IN 0x10 -#define PALMAS_INT2_MASK_RESET_IN_SHIFT 4 +#define PALMAS_INT2_MASK_RESET_IN_SHIFT 0x04 #define PALMAS_INT2_MASK_BATREMOVAL 0x08 -#define PALMAS_INT2_MASK_BATREMOVAL_SHIFT 3 +#define PALMAS_INT2_MASK_BATREMOVAL_SHIFT 0x03 #define PALMAS_INT2_MASK_WDT 0x04 -#define PALMAS_INT2_MASK_WDT_SHIFT 2 +#define PALMAS_INT2_MASK_WDT_SHIFT 0x02 #define PALMAS_INT2_MASK_RTC_TIMER 0x02 -#define PALMAS_INT2_MASK_RTC_TIMER_SHIFT 1 +#define PALMAS_INT2_MASK_RTC_TIMER_SHIFT 0x01 #define PALMAS_INT2_MASK_RTC_ALARM 0x01 -#define PALMAS_INT2_MASK_RTC_ALARM_SHIFT 0 +#define PALMAS_INT2_MASK_RTC_ALARM_SHIFT 0x00 /* Bit definitions for INT2_LINE_STATE */ #define PALMAS_INT2_LINE_STATE_VAC_ACOK 0x80 -#define PALMAS_INT2_LINE_STATE_VAC_ACOK_SHIFT 7 +#define PALMAS_INT2_LINE_STATE_VAC_ACOK_SHIFT 0x07 #define PALMAS_INT2_LINE_STATE_SHORT 0x40 -#define PALMAS_INT2_LINE_STATE_SHORT_SHIFT 6 +#define PALMAS_INT2_LINE_STATE_SHORT_SHIFT 0x06 #define PALMAS_INT2_LINE_STATE_FBI_BB 0x20 -#define PALMAS_INT2_LINE_STATE_FBI_BB_SHIFT 5 +#define PALMAS_INT2_LINE_STATE_FBI_BB_SHIFT 0x05 #define PALMAS_INT2_LINE_STATE_RESET_IN 0x10 -#define PALMAS_INT2_LINE_STATE_RESET_IN_SHIFT 4 +#define PALMAS_INT2_LINE_STATE_RESET_IN_SHIFT 0x04 #define PALMAS_INT2_LINE_STATE_BATREMOVAL 0x08 -#define PALMAS_INT2_LINE_STATE_BATREMOVAL_SHIFT 3 +#define PALMAS_INT2_LINE_STATE_BATREMOVAL_SHIFT 0x03 #define PALMAS_INT2_LINE_STATE_WDT 0x04 -#define PALMAS_INT2_LINE_STATE_WDT_SHIFT 2 +#define PALMAS_INT2_LINE_STATE_WDT_SHIFT 0x02 #define PALMAS_INT2_LINE_STATE_RTC_TIMER 0x02 -#define PALMAS_INT2_LINE_STATE_RTC_TIMER_SHIFT 1 +#define PALMAS_INT2_LINE_STATE_RTC_TIMER_SHIFT 0x01 #define PALMAS_INT2_LINE_STATE_RTC_ALARM 0x01 -#define PALMAS_INT2_LINE_STATE_RTC_ALARM_SHIFT 0 +#define PALMAS_INT2_LINE_STATE_RTC_ALARM_SHIFT 0x00 /* Bit definitions for INT3_STATUS */ #define PALMAS_INT3_STATUS_VBUS 0x80 -#define PALMAS_INT3_STATUS_VBUS_SHIFT 7 +#define PALMAS_INT3_STATUS_VBUS_SHIFT 0x07 #define PALMAS_INT3_STATUS_VBUS_OTG 0x40 -#define PALMAS_INT3_STATUS_VBUS_OTG_SHIFT 6 +#define PALMAS_INT3_STATUS_VBUS_OTG_SHIFT 0x06 #define PALMAS_INT3_STATUS_ID 0x20 -#define PALMAS_INT3_STATUS_ID_SHIFT 5 +#define PALMAS_INT3_STATUS_ID_SHIFT 0x05 #define PALMAS_INT3_STATUS_ID_OTG 0x10 -#define PALMAS_INT3_STATUS_ID_OTG_SHIFT 4 +#define PALMAS_INT3_STATUS_ID_OTG_SHIFT 0x04 #define PALMAS_INT3_STATUS_GPADC_EOC_RT 0x08 -#define PALMAS_INT3_STATUS_GPADC_EOC_RT_SHIFT 3 +#define PALMAS_INT3_STATUS_GPADC_EOC_RT_SHIFT 0x03 #define PALMAS_INT3_STATUS_GPADC_EOC_SW 0x04 -#define PALMAS_INT3_STATUS_GPADC_EOC_SW_SHIFT 2 +#define PALMAS_INT3_STATUS_GPADC_EOC_SW_SHIFT 0x02 #define PALMAS_INT3_STATUS_GPADC_AUTO_1 0x02 -#define PALMAS_INT3_STATUS_GPADC_AUTO_1_SHIFT 1 +#define PALMAS_INT3_STATUS_GPADC_AUTO_1_SHIFT 0x01 #define PALMAS_INT3_STATUS_GPADC_AUTO_0 0x01 -#define PALMAS_INT3_STATUS_GPADC_AUTO_0_SHIFT 0 +#define PALMAS_INT3_STATUS_GPADC_AUTO_0_SHIFT 0x00 /* Bit definitions for INT3_MASK */ #define PALMAS_INT3_MASK_VBUS 0x80 -#define PALMAS_INT3_MASK_VBUS_SHIFT 7 +#define PALMAS_INT3_MASK_VBUS_SHIFT 0x07 #define PALMAS_INT3_MASK_VBUS_OTG 0x40 -#define PALMAS_INT3_MASK_VBUS_OTG_SHIFT 6 +#define PALMAS_INT3_MASK_VBUS_OTG_SHIFT 0x06 #define PALMAS_INT3_MASK_ID 0x20 -#define PALMAS_INT3_MASK_ID_SHIFT 5 +#define PALMAS_INT3_MASK_ID_SHIFT 0x05 #define PALMAS_INT3_MASK_ID_OTG 0x10 -#define PALMAS_INT3_MASK_ID_OTG_SHIFT 4 +#define PALMAS_INT3_MASK_ID_OTG_SHIFT 0x04 #define PALMAS_INT3_MASK_GPADC_EOC_RT 0x08 -#define PALMAS_INT3_MASK_GPADC_EOC_RT_SHIFT 3 +#define PALMAS_INT3_MASK_GPADC_EOC_RT_SHIFT 0x03 #define PALMAS_INT3_MASK_GPADC_EOC_SW 0x04 -#define PALMAS_INT3_MASK_GPADC_EOC_SW_SHIFT 2 +#define PALMAS_INT3_MASK_GPADC_EOC_SW_SHIFT 0x02 #define PALMAS_INT3_MASK_GPADC_AUTO_1 0x02 -#define PALMAS_INT3_MASK_GPADC_AUTO_1_SHIFT 1 +#define PALMAS_INT3_MASK_GPADC_AUTO_1_SHIFT 0x01 #define PALMAS_INT3_MASK_GPADC_AUTO_0 0x01 -#define PALMAS_INT3_MASK_GPADC_AUTO_0_SHIFT 0 +#define PALMAS_INT3_MASK_GPADC_AUTO_0_SHIFT 0x00 /* Bit definitions for INT3_LINE_STATE */ #define PALMAS_INT3_LINE_STATE_VBUS 0x80 -#define PALMAS_INT3_LINE_STATE_VBUS_SHIFT 7 +#define PALMAS_INT3_LINE_STATE_VBUS_SHIFT 0x07 #define PALMAS_INT3_LINE_STATE_VBUS_OTG 0x40 -#define PALMAS_INT3_LINE_STATE_VBUS_OTG_SHIFT 6 +#define PALMAS_INT3_LINE_STATE_VBUS_OTG_SHIFT 0x06 #define PALMAS_INT3_LINE_STATE_ID 0x20 -#define PALMAS_INT3_LINE_STATE_ID_SHIFT 5 +#define PALMAS_INT3_LINE_STATE_ID_SHIFT 0x05 #define PALMAS_INT3_LINE_STATE_ID_OTG 0x10 -#define PALMAS_INT3_LINE_STATE_ID_OTG_SHIFT 4 +#define PALMAS_INT3_LINE_STATE_ID_OTG_SHIFT 0x04 #define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT 0x08 -#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT_SHIFT 3 +#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT_SHIFT 0x03 #define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW 0x04 -#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 2 +#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 0x02 #define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1 0x02 -#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 1 +#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 0x01 #define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0 0x01 -#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0 +#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0x00 /* Bit definitions for INT4_STATUS */ #define PALMAS_INT4_STATUS_GPIO_7 0x80 -#define PALMAS_INT4_STATUS_GPIO_7_SHIFT 7 +#define PALMAS_INT4_STATUS_GPIO_7_SHIFT 0x07 #define PALMAS_INT4_STATUS_GPIO_6 0x40 -#define PALMAS_INT4_STATUS_GPIO_6_SHIFT 6 +#define PALMAS_INT4_STATUS_GPIO_6_SHIFT 0x06 #define PALMAS_INT4_STATUS_GPIO_5 0x20 -#define PALMAS_INT4_STATUS_GPIO_5_SHIFT 5 +#define PALMAS_INT4_STATUS_GPIO_5_SHIFT 0x05 #define PALMAS_INT4_STATUS_GPIO_4 0x10 -#define PALMAS_INT4_STATUS_GPIO_4_SHIFT 4 +#define PALMAS_INT4_STATUS_GPIO_4_SHIFT 0x04 #define PALMAS_INT4_STATUS_GPIO_3 0x08 -#define PALMAS_INT4_STATUS_GPIO_3_SHIFT 3 +#define PALMAS_INT4_STATUS_GPIO_3_SHIFT 0x03 #define PALMAS_INT4_STATUS_GPIO_2 0x04 -#define PALMAS_INT4_STATUS_GPIO_2_SHIFT 2 +#define PALMAS_INT4_STATUS_GPIO_2_SHIFT 0x02 #define PALMAS_INT4_STATUS_GPIO_1 0x02 -#define PALMAS_INT4_STATUS_GPIO_1_SHIFT 1 +#define PALMAS_INT4_STATUS_GPIO_1_SHIFT 0x01 #define PALMAS_INT4_STATUS_GPIO_0 0x01 -#define PALMAS_INT4_STATUS_GPIO_0_SHIFT 0 +#define PALMAS_INT4_STATUS_GPIO_0_SHIFT 0x00 /* Bit definitions for INT4_MASK */ #define PALMAS_INT4_MASK_GPIO_7 0x80 -#define PALMAS_INT4_MASK_GPIO_7_SHIFT 7 +#define PALMAS_INT4_MASK_GPIO_7_SHIFT 0x07 #define PALMAS_INT4_MASK_GPIO_6 0x40 -#define PALMAS_INT4_MASK_GPIO_6_SHIFT 6 +#define PALMAS_INT4_MASK_GPIO_6_SHIFT 0x06 #define PALMAS_INT4_MASK_GPIO_5 0x20 -#define PALMAS_INT4_MASK_GPIO_5_SHIFT 5 +#define PALMAS_INT4_MASK_GPIO_5_SHIFT 0x05 #define PALMAS_INT4_MASK_GPIO_4 0x10 -#define PALMAS_INT4_MASK_GPIO_4_SHIFT 4 +#define PALMAS_INT4_MASK_GPIO_4_SHIFT 0x04 #define PALMAS_INT4_MASK_GPIO_3 0x08 -#define PALMAS_INT4_MASK_GPIO_3_SHIFT 3 +#define PALMAS_INT4_MASK_GPIO_3_SHIFT 0x03 #define PALMAS_INT4_MASK_GPIO_2 0x04 -#define PALMAS_INT4_MASK_GPIO_2_SHIFT 2 +#define PALMAS_INT4_MASK_GPIO_2_SHIFT 0x02 #define PALMAS_INT4_MASK_GPIO_1 0x02 -#define PALMAS_INT4_MASK_GPIO_1_SHIFT 1 +#define PALMAS_INT4_MASK_GPIO_1_SHIFT 0x01 #define PALMAS_INT4_MASK_GPIO_0 0x01 -#define PALMAS_INT4_MASK_GPIO_0_SHIFT 0 +#define PALMAS_INT4_MASK_GPIO_0_SHIFT 0x00 /* Bit definitions for INT4_LINE_STATE */ #define PALMAS_INT4_LINE_STATE_GPIO_7 0x80 -#define PALMAS_INT4_LINE_STATE_GPIO_7_SHIFT 7 +#define PALMAS_INT4_LINE_STATE_GPIO_7_SHIFT 0x07 #define PALMAS_INT4_LINE_STATE_GPIO_6 0x40 -#define PALMAS_INT4_LINE_STATE_GPIO_6_SHIFT 6 +#define PALMAS_INT4_LINE_STATE_GPIO_6_SHIFT 0x06 #define PALMAS_INT4_LINE_STATE_GPIO_5 0x20 -#define PALMAS_INT4_LINE_STATE_GPIO_5_SHIFT 5 +#define PALMAS_INT4_LINE_STATE_GPIO_5_SHIFT 0x05 #define PALMAS_INT4_LINE_STATE_GPIO_4 0x10 -#define PALMAS_INT4_LINE_STATE_GPIO_4_SHIFT 4 +#define PALMAS_INT4_LINE_STATE_GPIO_4_SHIFT 0x04 #define PALMAS_INT4_LINE_STATE_GPIO_3 0x08 -#define PALMAS_INT4_LINE_STATE_GPIO_3_SHIFT 3 +#define PALMAS_INT4_LINE_STATE_GPIO_3_SHIFT 0x03 #define PALMAS_INT4_LINE_STATE_GPIO_2 0x04 -#define PALMAS_INT4_LINE_STATE_GPIO_2_SHIFT 2 +#define PALMAS_INT4_LINE_STATE_GPIO_2_SHIFT 0x02 #define PALMAS_INT4_LINE_STATE_GPIO_1 0x02 -#define PALMAS_INT4_LINE_STATE_GPIO_1_SHIFT 1 +#define PALMAS_INT4_LINE_STATE_GPIO_1_SHIFT 0x01 #define PALMAS_INT4_LINE_STATE_GPIO_0 0x01 -#define PALMAS_INT4_LINE_STATE_GPIO_0_SHIFT 0 +#define PALMAS_INT4_LINE_STATE_GPIO_0_SHIFT 0x00 /* Bit definitions for INT4_EDGE_DETECT1 */ #define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING 0x80 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 7 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 0x07 #define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING 0x40 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 6 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 0x06 #define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING 0x20 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 5 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 0x05 #define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING 0x10 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 4 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 0x04 #define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING 0x08 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 3 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 0x03 #define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING 0x04 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 2 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 0x02 #define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING 0x02 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 1 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 0x01 #define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING 0x01 -#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0x00 /* Bit definitions for INT4_EDGE_DETECT2 */ #define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING 0x80 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING_SHIFT 7 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING_SHIFT 0x07 #define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING 0x40 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING_SHIFT 6 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING_SHIFT 0x06 #define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING 0x20 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 5 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 0x05 #define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING 0x10 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 4 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 0x04 #define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING 0x08 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 3 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 0x03 #define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING 0x04 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 2 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 0x02 #define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING 0x02 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 1 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 0x01 #define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING 0x01 -#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0x00 /* Bit definitions for INT_CTRL */ #define PALMAS_INT_CTRL_INT_PENDING 0x04 -#define PALMAS_INT_CTRL_INT_PENDING_SHIFT 2 +#define PALMAS_INT_CTRL_INT_PENDING_SHIFT 0x02 #define PALMAS_INT_CTRL_INT_CLEAR 0x01 -#define PALMAS_INT_CTRL_INT_CLEAR_SHIFT 0 +#define PALMAS_INT_CTRL_INT_CLEAR_SHIFT 0x00 /* Registers for function USB_OTG */ -#define PALMAS_USB_WAKEUP 0x3 -#define PALMAS_USB_VBUS_CTRL_SET 0x4 -#define PALMAS_USB_VBUS_CTRL_CLR 0x5 -#define PALMAS_USB_ID_CTRL_SET 0x6 -#define PALMAS_USB_ID_CTRL_CLEAR 0x7 -#define PALMAS_USB_VBUS_INT_SRC 0x8 -#define PALMAS_USB_VBUS_INT_LATCH_SET 0x9 -#define PALMAS_USB_VBUS_INT_LATCH_CLR 0xA -#define PALMAS_USB_VBUS_INT_EN_LO_SET 0xB -#define PALMAS_USB_VBUS_INT_EN_LO_CLR 0xC -#define PALMAS_USB_VBUS_INT_EN_HI_SET 0xD -#define PALMAS_USB_VBUS_INT_EN_HI_CLR 0xE -#define PALMAS_USB_ID_INT_SRC 0xF +#define PALMAS_USB_WAKEUP 0x03 +#define PALMAS_USB_VBUS_CTRL_SET 0x04 +#define PALMAS_USB_VBUS_CTRL_CLR 0x05 +#define PALMAS_USB_ID_CTRL_SET 0x06 +#define PALMAS_USB_ID_CTRL_CLEAR 0x07 +#define PALMAS_USB_VBUS_INT_SRC 0x08 +#define PALMAS_USB_VBUS_INT_LATCH_SET 0x09 +#define PALMAS_USB_VBUS_INT_LATCH_CLR 0x0A +#define PALMAS_USB_VBUS_INT_EN_LO_SET 0x0B +#define PALMAS_USB_VBUS_INT_EN_LO_CLR 0x0C +#define PALMAS_USB_VBUS_INT_EN_HI_SET 0x0D +#define PALMAS_USB_VBUS_INT_EN_HI_CLR 0x0E +#define PALMAS_USB_ID_INT_SRC 0x0F #define PALMAS_USB_ID_INT_LATCH_SET 0x10 #define PALMAS_USB_ID_INT_LATCH_CLR 0x11 #define PALMAS_USB_ID_INT_EN_LO_SET 0x12 @@ -2250,306 +2250,306 @@ enum usb_irq_events { /* Bit definitions for USB_WAKEUP */ #define PALMAS_USB_WAKEUP_ID_WK_UP_COMP 0x01 -#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP_SHIFT 0 +#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP_SHIFT 0x00 /* Bit definitions for USB_VBUS_CTRL_SET */ #define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS 0x80 -#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS_SHIFT 7 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS_SHIFT 0x07 #define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG 0x20 -#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG_SHIFT 5 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG_SHIFT 0x05 #define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC 0x10 -#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC_SHIFT 4 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC_SHIFT 0x04 #define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK 0x08 -#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK_SHIFT 3 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK_SHIFT 0x03 #define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP 0x04 -#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP_SHIFT 2 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP_SHIFT 0x02 /* Bit definitions for USB_VBUS_CTRL_CLR */ #define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS 0x80 -#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS_SHIFT 7 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS_SHIFT 0x07 #define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG 0x20 -#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG_SHIFT 5 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG_SHIFT 0x05 #define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC 0x10 -#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC_SHIFT 4 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC_SHIFT 0x04 #define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK 0x08 -#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK_SHIFT 3 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK_SHIFT 0x03 #define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP 0x04 -#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP_SHIFT 2 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP_SHIFT 0x02 /* Bit definitions for USB_ID_CTRL_SET */ #define PALMAS_USB_ID_CTRL_SET_ID_PU_220K 0x80 -#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K_SHIFT 7 +#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K_SHIFT 0x07 #define PALMAS_USB_ID_CTRL_SET_ID_PU_100K 0x40 -#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K_SHIFT 6 +#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K_SHIFT 0x06 #define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV 0x20 -#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV_SHIFT 5 +#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV_SHIFT 0x05 #define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U 0x10 -#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U_SHIFT 4 +#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U_SHIFT 0x04 #define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U 0x08 -#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U_SHIFT 3 +#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U_SHIFT 0x03 #define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP 0x04 -#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP_SHIFT 2 +#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP_SHIFT 0x02 /* Bit definitions for USB_ID_CTRL_CLEAR */ #define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K 0x80 -#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K_SHIFT 7 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K_SHIFT 0x07 #define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K 0x40 -#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K_SHIFT 6 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K_SHIFT 0x06 #define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV 0x20 -#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV_SHIFT 5 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV_SHIFT 0x05 #define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U 0x10 -#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U_SHIFT 4 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U_SHIFT 0x04 #define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U 0x08 -#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U_SHIFT 3 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U_SHIFT 0x03 #define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP 0x04 -#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP_SHIFT 2 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP_SHIFT 0x02 /* Bit definitions for USB_VBUS_INT_SRC */ #define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_SRC_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_SRC_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_VBUS_INT_LATCH_SET */ #define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_LATCH_SET_ADP 0x10 -#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP_SHIFT 4 +#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP_SHIFT 0x04 #define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_VBUS_INT_LATCH_CLR */ #define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP 0x10 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP_SHIFT 4 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP_SHIFT 0x04 #define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_VBUS_INT_EN_LO_SET */ #define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_VBUS_INT_EN_LO_CLR */ #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_VBUS_INT_EN_HI_SET */ #define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP 0x10 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP_SHIFT 4 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP_SHIFT 0x04 #define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_VBUS_INT_EN_HI_CLR */ #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD 0x80 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD_SHIFT 7 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD_SHIFT 0x07 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB 0x40 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB_SHIFT 6 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB_SHIFT 0x06 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS 0x20 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS_SHIFT 5 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS_SHIFT 0x05 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP 0x10 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP_SHIFT 4 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP_SHIFT 0x04 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD 0x08 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD_SHIFT 3 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD_SHIFT 0x03 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD 0x04 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD_SHIFT 2 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD_SHIFT 0x02 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD 0x02 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD_SHIFT 1 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD_SHIFT 0x01 #define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END 0x01 -#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END_SHIFT 0 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END_SHIFT 0x00 /* Bit definitions for USB_ID_INT_SRC */ #define PALMAS_USB_ID_INT_SRC_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_SRC_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_SRC_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_SRC_ID_A 0x08 -#define PALMAS_USB_ID_INT_SRC_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_SRC_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_SRC_ID_B 0x04 -#define PALMAS_USB_ID_INT_SRC_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_SRC_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_SRC_ID_C 0x02 -#define PALMAS_USB_ID_INT_SRC_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_SRC_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_SRC_ID_GND 0x01 -#define PALMAS_USB_ID_INT_SRC_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_SRC_ID_GND_SHIFT 0x00 /* Bit definitions for USB_ID_INT_LATCH_SET */ #define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_LATCH_SET_ID_A 0x08 -#define PALMAS_USB_ID_INT_LATCH_SET_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_LATCH_SET_ID_B 0x04 -#define PALMAS_USB_ID_INT_LATCH_SET_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_LATCH_SET_ID_C 0x02 -#define PALMAS_USB_ID_INT_LATCH_SET_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_LATCH_SET_ID_GND 0x01 -#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND_SHIFT 0x00 /* Bit definitions for USB_ID_INT_LATCH_CLR */ #define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_LATCH_CLR_ID_A 0x08 -#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_LATCH_CLR_ID_B 0x04 -#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_LATCH_CLR_ID_C 0x02 -#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND 0x01 -#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND_SHIFT 0x00 /* Bit definitions for USB_ID_INT_EN_LO_SET */ #define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_EN_LO_SET_ID_A 0x08 -#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_EN_LO_SET_ID_B 0x04 -#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_EN_LO_SET_ID_C 0x02 -#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND 0x01 -#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND_SHIFT 0x00 /* Bit definitions for USB_ID_INT_EN_LO_CLR */ #define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A 0x08 -#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B 0x04 -#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C 0x02 -#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND 0x01 -#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND_SHIFT 0x00 /* Bit definitions for USB_ID_INT_EN_HI_SET */ #define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_EN_HI_SET_ID_A 0x08 -#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_EN_HI_SET_ID_B 0x04 -#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_EN_HI_SET_ID_C 0x02 -#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND 0x01 -#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND_SHIFT 0x00 /* Bit definitions for USB_ID_INT_EN_HI_CLR */ #define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT 0x10 -#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT_SHIFT 4 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT_SHIFT 0x04 #define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A 0x08 -#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A_SHIFT 3 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A_SHIFT 0x03 #define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B 0x04 -#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B_SHIFT 2 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B_SHIFT 0x02 #define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C 0x02 -#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C_SHIFT 1 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C_SHIFT 0x01 #define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND 0x01 -#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND_SHIFT 0 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND_SHIFT 0x00 /* Bit definitions for USB_OTG_ADP_CTRL */ #define PALMAS_USB_OTG_ADP_CTRL_ADP_EN 0x04 -#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN_SHIFT 2 +#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN_SHIFT 0x02 #define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_MASK 0x03 -#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_SHIFT 0 +#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_SHIFT 0x00 /* Bit definitions for USB_OTG_ADP_HIGH */ -#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_MASK 0xff -#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_SHIFT 0 +#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_MASK 0xFF +#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_SHIFT 0x00 /* Bit definitions for USB_OTG_ADP_LOW */ -#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_MASK 0xff -#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_SHIFT 0 +#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_MASK 0xFF +#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_SHIFT 0x00 /* Bit definitions for USB_OTG_ADP_RISE */ -#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_MASK 0xff -#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_SHIFT 0 +#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_MASK 0xFF +#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_SHIFT 0x00 /* Bit definitions for USB_OTG_REVISION */ #define PALMAS_USB_OTG_REVISION_OTG_REV 0x01 -#define PALMAS_USB_OTG_REVISION_OTG_REV_SHIFT 0 +#define PALMAS_USB_OTG_REVISION_OTG_REV_SHIFT 0x00 /* Registers for function VIBRATOR */ -#define PALMAS_VIBRA_CTRL 0x0 +#define PALMAS_VIBRA_CTRL 0x00 /* Bit definitions for VIBRA_CTRL */ #define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_MASK 0x06 -#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_SHIFT 1 +#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_SHIFT 0x01 #define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL 0x01 -#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL_SHIFT 0 +#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL_SHIFT 0x00 /* Registers for function GPIO */ -#define PALMAS_GPIO_DATA_IN 0x0 -#define PALMAS_GPIO_DATA_DIR 0x1 -#define PALMAS_GPIO_DATA_OUT 0x2 -#define PALMAS_GPIO_DEBOUNCE_EN 0x3 -#define PALMAS_GPIO_CLEAR_DATA_OUT 0x4 -#define PALMAS_GPIO_SET_DATA_OUT 0x5 -#define PALMAS_PU_PD_GPIO_CTRL1 0x6 -#define PALMAS_PU_PD_GPIO_CTRL2 0x7 -#define PALMAS_OD_OUTPUT_GPIO_CTRL 0x8 -#define PALMAS_GPIO_DATA_IN2 0x9 +#define PALMAS_GPIO_DATA_IN 0x00 +#define PALMAS_GPIO_DATA_DIR 0x01 +#define PALMAS_GPIO_DATA_OUT 0x02 +#define PALMAS_GPIO_DEBOUNCE_EN 0x03 +#define PALMAS_GPIO_CLEAR_DATA_OUT 0x04 +#define PALMAS_GPIO_SET_DATA_OUT 0x05 +#define PALMAS_PU_PD_GPIO_CTRL1 0x06 +#define PALMAS_PU_PD_GPIO_CTRL2 0x07 +#define PALMAS_OD_OUTPUT_GPIO_CTRL 0x08 +#define PALMAS_GPIO_DATA_IN2 0x09 #define PALMAS_GPIO_DATA_DIR2 0x0A #define PALMAS_GPIO_DATA_OUT2 0x0B #define PALMAS_GPIO_DEBOUNCE_EN2 0x0C @@ -2561,167 +2561,167 @@ enum usb_irq_events { /* Bit definitions for GPIO_DATA_IN */ #define PALMAS_GPIO_DATA_IN_GPIO_7_IN 0x80 -#define PALMAS_GPIO_DATA_IN_GPIO_7_IN_SHIFT 7 +#define PALMAS_GPIO_DATA_IN_GPIO_7_IN_SHIFT 0x07 #define PALMAS_GPIO_DATA_IN_GPIO_6_IN 0x40 -#define PALMAS_GPIO_DATA_IN_GPIO_6_IN_SHIFT 6 +#define PALMAS_GPIO_DATA_IN_GPIO_6_IN_SHIFT 0x06 #define PALMAS_GPIO_DATA_IN_GPIO_5_IN 0x20 -#define PALMAS_GPIO_DATA_IN_GPIO_5_IN_SHIFT 5 +#define PALMAS_GPIO_DATA_IN_GPIO_5_IN_SHIFT 0x05 #define PALMAS_GPIO_DATA_IN_GPIO_4_IN 0x10 -#define PALMAS_GPIO_DATA_IN_GPIO_4_IN_SHIFT 4 +#define PALMAS_GPIO_DATA_IN_GPIO_4_IN_SHIFT 0x04 #define PALMAS_GPIO_DATA_IN_GPIO_3_IN 0x08 -#define PALMAS_GPIO_DATA_IN_GPIO_3_IN_SHIFT 3 +#define PALMAS_GPIO_DATA_IN_GPIO_3_IN_SHIFT 0x03 #define PALMAS_GPIO_DATA_IN_GPIO_2_IN 0x04 -#define PALMAS_GPIO_DATA_IN_GPIO_2_IN_SHIFT 2 +#define PALMAS_GPIO_DATA_IN_GPIO_2_IN_SHIFT 0x02 #define PALMAS_GPIO_DATA_IN_GPIO_1_IN 0x02 -#define PALMAS_GPIO_DATA_IN_GPIO_1_IN_SHIFT 1 +#define PALMAS_GPIO_DATA_IN_GPIO_1_IN_SHIFT 0x01 #define PALMAS_GPIO_DATA_IN_GPIO_0_IN 0x01 -#define PALMAS_GPIO_DATA_IN_GPIO_0_IN_SHIFT 0 +#define PALMAS_GPIO_DATA_IN_GPIO_0_IN_SHIFT 0x00 /* Bit definitions for GPIO_DATA_DIR */ #define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR 0x80 -#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR_SHIFT 7 +#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR_SHIFT 0x07 #define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR 0x40 -#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR_SHIFT 6 +#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR_SHIFT 0x06 #define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR 0x20 -#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR_SHIFT 5 +#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR_SHIFT 0x05 #define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR 0x10 -#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR_SHIFT 4 +#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR_SHIFT 0x04 #define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR 0x08 -#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR_SHIFT 3 +#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR_SHIFT 0x03 #define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR 0x04 -#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR_SHIFT 2 +#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR_SHIFT 0x02 #define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR 0x02 -#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR_SHIFT 1 +#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR_SHIFT 0x01 #define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR 0x01 -#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR_SHIFT 0 +#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR_SHIFT 0x00 /* Bit definitions for GPIO_DATA_OUT */ #define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT 0x80 -#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT_SHIFT 7 +#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT_SHIFT 0x07 #define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT 0x40 -#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT_SHIFT 6 +#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT_SHIFT 0x06 #define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT 0x20 -#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT_SHIFT 5 +#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT_SHIFT 0x05 #define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT 0x10 -#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT_SHIFT 4 +#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT_SHIFT 0x04 #define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT 0x08 -#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT_SHIFT 3 +#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT_SHIFT 0x03 #define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT 0x04 -#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT_SHIFT 2 +#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT_SHIFT 0x02 #define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT 0x02 -#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT_SHIFT 1 +#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT_SHIFT 0x01 #define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT 0x01 -#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT_SHIFT 0 +#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT_SHIFT 0x00 /* Bit definitions for GPIO_DEBOUNCE_EN */ #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN 0x80 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN_SHIFT 7 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN_SHIFT 0x07 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN 0x40 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN_SHIFT 6 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN_SHIFT 0x06 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN 0x20 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN_SHIFT 5 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN_SHIFT 0x05 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN 0x10 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN_SHIFT 4 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN_SHIFT 0x04 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN 0x08 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN_SHIFT 3 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN_SHIFT 0x03 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN 0x04 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN_SHIFT 2 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN_SHIFT 0x02 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN 0x02 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN_SHIFT 1 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN_SHIFT 0x01 #define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN 0x01 -#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN_SHIFT 0 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN_SHIFT 0x00 /* Bit definitions for GPIO_CLEAR_DATA_OUT */ #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT 0x80 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT_SHIFT 7 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT_SHIFT 0x07 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT 0x40 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT_SHIFT 6 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT_SHIFT 0x06 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT 0x20 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT_SHIFT 5 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT_SHIFT 0x05 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT 0x10 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT_SHIFT 4 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT_SHIFT 0x04 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT 0x08 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT_SHIFT 3 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT_SHIFT 0x03 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT 0x04 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT_SHIFT 2 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT_SHIFT 0x02 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT 0x02 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT_SHIFT 1 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT_SHIFT 0x01 #define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT 0x01 -#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT_SHIFT 0 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT_SHIFT 0x00 /* Bit definitions for GPIO_SET_DATA_OUT */ #define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT 0x80 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT_SHIFT 7 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT_SHIFT 0x07 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT 0x40 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT_SHIFT 6 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT_SHIFT 0x06 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT 0x20 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT_SHIFT 5 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT_SHIFT 0x05 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT 0x10 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT_SHIFT 4 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT_SHIFT 0x04 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT 0x08 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT_SHIFT 3 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT_SHIFT 0x03 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT 0x04 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT_SHIFT 2 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT_SHIFT 0x02 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT 0x02 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT_SHIFT 1 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT_SHIFT 0x01 #define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT 0x01 -#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT_SHIFT 0 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT_SHIFT 0x00 /* Bit definitions for PU_PD_GPIO_CTRL1 */ #define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD 0x40 -#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD_SHIFT 6 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD_SHIFT 0x06 #define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU 0x20 -#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU_SHIFT 5 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU_SHIFT 0x05 #define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD 0x10 -#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD_SHIFT 4 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD_SHIFT 0x04 #define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU 0x08 -#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU_SHIFT 3 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU_SHIFT 0x03 #define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD 0x04 -#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD_SHIFT 2 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD_SHIFT 0x02 #define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD 0x01 -#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD_SHIFT 0 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD_SHIFT 0x00 /* Bit definitions for PU_PD_GPIO_CTRL2 */ #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD 0x40 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD_SHIFT 6 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD_SHIFT 0x06 #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU 0x20 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU_SHIFT 5 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU_SHIFT 0x05 #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD 0x10 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD_SHIFT 4 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD_SHIFT 0x04 #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU 0x08 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU_SHIFT 3 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU_SHIFT 0x03 #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD 0x04 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD_SHIFT 2 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD_SHIFT 0x02 #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU 0x02 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU_SHIFT 1 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU_SHIFT 0x01 #define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD 0x01 -#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD_SHIFT 0 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD_SHIFT 0x00 /* Bit definitions for OD_OUTPUT_GPIO_CTRL */ #define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD 0x20 -#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD_SHIFT 5 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD_SHIFT 0x05 #define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD 0x04 -#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD_SHIFT 2 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD_SHIFT 0x02 #define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD 0x02 -#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD_SHIFT 1 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD_SHIFT 0x01 /* Registers for function GPADC */ -#define PALMAS_GPADC_CTRL1 0x0 -#define PALMAS_GPADC_CTRL2 0x1 -#define PALMAS_GPADC_RT_CTRL 0x2 -#define PALMAS_GPADC_AUTO_CTRL 0x3 -#define PALMAS_GPADC_STATUS 0x4 -#define PALMAS_GPADC_RT_SELECT 0x5 -#define PALMAS_GPADC_RT_CONV0_LSB 0x6 -#define PALMAS_GPADC_RT_CONV0_MSB 0x7 -#define PALMAS_GPADC_AUTO_SELECT 0x8 -#define PALMAS_GPADC_AUTO_CONV0_LSB 0x9 -#define PALMAS_GPADC_AUTO_CONV0_MSB 0xA -#define PALMAS_GPADC_AUTO_CONV1_LSB 0xB -#define PALMAS_GPADC_AUTO_CONV1_MSB 0xC -#define PALMAS_GPADC_SW_SELECT 0xD -#define PALMAS_GPADC_SW_CONV0_LSB 0xE -#define PALMAS_GPADC_SW_CONV0_MSB 0xF +#define PALMAS_GPADC_CTRL1 0x00 +#define PALMAS_GPADC_CTRL2 0x01 +#define PALMAS_GPADC_RT_CTRL 0x02 +#define PALMAS_GPADC_AUTO_CTRL 0x03 +#define PALMAS_GPADC_STATUS 0x04 +#define PALMAS_GPADC_RT_SELECT 0x05 +#define PALMAS_GPADC_RT_CONV0_LSB 0x06 +#define PALMAS_GPADC_RT_CONV0_MSB 0x07 +#define PALMAS_GPADC_AUTO_SELECT 0x08 +#define PALMAS_GPADC_AUTO_CONV0_LSB 0x09 +#define PALMAS_GPADC_AUTO_CONV0_MSB 0x0A +#define PALMAS_GPADC_AUTO_CONV1_LSB 0x0B +#define PALMAS_GPADC_AUTO_CONV1_MSB 0x0C +#define PALMAS_GPADC_SW_SELECT 0x0D +#define PALMAS_GPADC_SW_CONV0_LSB 0x0E +#define PALMAS_GPADC_SW_CONV0_MSB 0x0F #define PALMAS_GPADC_THRES_CONV0_LSB 0x10 #define PALMAS_GPADC_THRES_CONV0_MSB 0x11 #define PALMAS_GPADC_THRES_CONV1_LSB 0x12 @@ -2731,150 +2731,150 @@ enum usb_irq_events { /* Bit definitions for GPADC_CTRL1 */ #define PALMAS_GPADC_CTRL1_RESERVED_MASK 0xc0 -#define PALMAS_GPADC_CTRL1_RESERVED_SHIFT 6 +#define PALMAS_GPADC_CTRL1_RESERVED_SHIFT 0x06 #define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK 0x30 -#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT 4 +#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT 0x04 #define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_MASK 0x0c -#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT 2 +#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT 0x02 #define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET 0x02 -#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET_SHIFT 1 +#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET_SHIFT 0x01 #define PALMAS_GPADC_CTRL1_GPADC_FORCE 0x01 -#define PALMAS_GPADC_CTRL1_GPADC_FORCE_SHIFT 0 +#define PALMAS_GPADC_CTRL1_GPADC_FORCE_SHIFT 0x00 /* Bit definitions for GPADC_CTRL2 */ #define PALMAS_GPADC_CTRL2_RESERVED_MASK 0x06 -#define PALMAS_GPADC_CTRL2_RESERVED_SHIFT 1 +#define PALMAS_GPADC_CTRL2_RESERVED_SHIFT 0x01 /* Bit definitions for GPADC_RT_CTRL */ #define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY 0x02 -#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT 1 +#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT 0x01 #define PALMAS_GPADC_RT_CTRL_START_POLARITY 0x01 -#define PALMAS_GPADC_RT_CTRL_START_POLARITY_SHIFT 0 +#define PALMAS_GPADC_RT_CTRL_START_POLARITY_SHIFT 0x00 /* Bit definitions for GPADC_AUTO_CTRL */ #define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1 0x80 -#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1_SHIFT 7 +#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1_SHIFT 0x07 #define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0 0x40 -#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0_SHIFT 6 +#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0_SHIFT 0x06 #define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN 0x20 -#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN_SHIFT 5 +#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN_SHIFT 0x05 #define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN 0x10 -#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN_SHIFT 4 -#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK 0x0f -#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_SHIFT 0 +#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN_SHIFT 0x04 +#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK 0x0F +#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_SHIFT 0x00 /* Bit definitions for GPADC_STATUS */ #define PALMAS_GPADC_STATUS_GPADC_AVAILABLE 0x10 -#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE_SHIFT 4 +#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE_SHIFT 0x04 /* Bit definitions for GPADC_RT_SELECT */ #define PALMAS_GPADC_RT_SELECT_RT_CONV_EN 0x80 -#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN_SHIFT 7 -#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_MASK 0x0f -#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_SHIFT 0 +#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN_SHIFT 0x07 +#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_MASK 0x0F +#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_SHIFT 0x00 /* Bit definitions for GPADC_RT_CONV0_LSB */ -#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_MASK 0xff -#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_SHIFT 0 +#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_SHIFT 0x00 /* Bit definitions for GPADC_RT_CONV0_MSB */ -#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_MASK 0x0f -#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_SHIFT 0 +#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_SHIFT 0x00 /* Bit definitions for GPADC_AUTO_SELECT */ -#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_MASK 0xf0 -#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_SHIFT 4 -#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_MASK 0x0f -#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_SHIFT 0 +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_MASK 0xF0 +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_SHIFT 0x04 +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_MASK 0x0F +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_SHIFT 0x00 /* Bit definitions for GPADC_AUTO_CONV0_LSB */ -#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_MASK 0xff -#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_SHIFT 0 +#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_SHIFT 0x00 /* Bit definitions for GPADC_AUTO_CONV0_MSB */ -#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_MASK 0x0f -#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_SHIFT 0 +#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_SHIFT 0x00 /* Bit definitions for GPADC_AUTO_CONV1_LSB */ -#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_MASK 0xff -#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_SHIFT 0 +#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_MASK 0xFF +#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_SHIFT 0x00 /* Bit definitions for GPADC_AUTO_CONV1_MSB */ -#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_MASK 0x0f -#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_SHIFT 0 +#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_MASK 0x0F +#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_SHIFT 0x00 /* Bit definitions for GPADC_SW_SELECT */ #define PALMAS_GPADC_SW_SELECT_SW_CONV_EN 0x80 -#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN_SHIFT 7 +#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN_SHIFT 0x07 #define PALMAS_GPADC_SW_SELECT_SW_START_CONV0 0x10 -#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0_SHIFT 4 -#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK 0x0f -#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_SHIFT 0 +#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0_SHIFT 0x04 +#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK 0x0F +#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_SHIFT 0x00 /* Bit definitions for GPADC_SW_CONV0_LSB */ -#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_MASK 0xff -#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_SHIFT 0 +#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_SHIFT 0x00 /* Bit definitions for GPADC_SW_CONV0_MSB */ -#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_MASK 0x0f -#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_SHIFT 0 +#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_SHIFT 0x00 /* Bit definitions for GPADC_THRES_CONV0_LSB */ -#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_MASK 0xff -#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_SHIFT 0 +#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_SHIFT 0x00 /* Bit definitions for GPADC_THRES_CONV0_MSB */ #define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL 0x80 -#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL_SHIFT 7 -#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_MASK 0x0f -#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_SHIFT 0 +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL_SHIFT 0x07 +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_SHIFT 0x00 /* Bit definitions for GPADC_THRES_CONV1_LSB */ -#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_MASK 0xff -#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_SHIFT 0 +#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_MASK 0xFF +#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_SHIFT 0x00 /* Bit definitions for GPADC_THRES_CONV1_MSB */ #define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL 0x80 -#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL_SHIFT 7 -#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_MASK 0x0f -#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_SHIFT 0 +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL_SHIFT 0x07 +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_MASK 0x0F +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_SHIFT 0x00 /* Bit definitions for GPADC_SMPS_ILMONITOR_EN */ #define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN 0x20 -#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN_SHIFT 5 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN_SHIFT 0x05 #define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT 0x10 -#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT_SHIFT 4 -#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_MASK 0x0f -#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_SHIFT 0 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT_SHIFT 0x04 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_MASK 0x0F +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_SHIFT 0x00 /* Bit definitions for GPADC_SMPS_VSEL_MONITORING */ #define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE 0x80 -#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE_SHIFT 7 -#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_MASK 0x7f -#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_SHIFT 0 +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE_SHIFT 0x07 +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_MASK 0x7F +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_SHIFT 0x00 /* Registers for function GPADC */ -#define PALMAS_GPADC_TRIM1 0x0 -#define PALMAS_GPADC_TRIM2 0x1 -#define PALMAS_GPADC_TRIM3 0x2 -#define PALMAS_GPADC_TRIM4 0x3 -#define PALMAS_GPADC_TRIM5 0x4 -#define PALMAS_GPADC_TRIM6 0x5 -#define PALMAS_GPADC_TRIM7 0x6 -#define PALMAS_GPADC_TRIM8 0x7 -#define PALMAS_GPADC_TRIM9 0x8 -#define PALMAS_GPADC_TRIM10 0x9 -#define PALMAS_GPADC_TRIM11 0xA -#define PALMAS_GPADC_TRIM12 0xB -#define PALMAS_GPADC_TRIM13 0xC -#define PALMAS_GPADC_TRIM14 0xD -#define PALMAS_GPADC_TRIM15 0xE -#define PALMAS_GPADC_TRIM16 0xF +#define PALMAS_GPADC_TRIM1 0x00 +#define PALMAS_GPADC_TRIM2 0x01 +#define PALMAS_GPADC_TRIM3 0x02 +#define PALMAS_GPADC_TRIM4 0x03 +#define PALMAS_GPADC_TRIM5 0x04 +#define PALMAS_GPADC_TRIM6 0x05 +#define PALMAS_GPADC_TRIM7 0x06 +#define PALMAS_GPADC_TRIM8 0x07 +#define PALMAS_GPADC_TRIM9 0x08 +#define PALMAS_GPADC_TRIM10 0x09 +#define PALMAS_GPADC_TRIM11 0x0A +#define PALMAS_GPADC_TRIM12 0x0B +#define PALMAS_GPADC_TRIM13 0x0C +#define PALMAS_GPADC_TRIM14 0x0D +#define PALMAS_GPADC_TRIM15 0x0E +#define PALMAS_GPADC_TRIM16 0x0F static inline int palmas_read(struct palmas *palmas, unsigned int base, unsigned int reg, unsigned int *val) { - unsigned int addr = PALMAS_BASE_TO_REG(base, reg); + unsigned int addr = PALMAS_BASE_TO_REG(base, reg); int slave_id = PALMAS_BASE_TO_SLAVE(base); return regmap_read(palmas->regmap[slave_id], addr, val); -- cgit v1.2.3 From 368c96640d10a145da5f258f2d2833668d4f3629 Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Mon, 2 Jun 2014 17:19:52 -0700 Subject: HID: core: add two new usages for digitizer On Feb 17, 2014, two new usages are approved to HID usage Table 18 - Digitizer Page: 5A Secondary Barrel Switch MC 16.4 5B Transducer Serial Number SV 16.3.1 This patch adds relevant definitions to hid/input. It also removes outdated comments in hid.h. Signed-off-by: Ping Cheng Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-debug.c | 2 ++ drivers/hid/hid-input.c | 5 +++++ include/linux/hid.h | 7 ++----- 3 files changed, 9 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c2537df10f47..84c3cb15ccdd 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -165,6 +165,8 @@ static const struct hid_usage_entry hid_usage_table[] = { {0, 0x53, "DeviceIndex"}, {0, 0x54, "ContactCount"}, {0, 0x55, "ContactMaximumNumber"}, + {0, 0x5A, "SecondaryBarrelSwitch"}, + {0, 0x5B, "TransducerSerialNumber"}, { 15, 0, "PhysicalInterfaceDevice" }, {0, 0x00, "Undefined"}, {0, 0x01, "Physical_Interface_Device"}, diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 9f2076acffb1..2619f7f4517a 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -684,9 +684,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; case 0x46: /* TabletPick */ + case 0x5a: /* SecondaryBarrelSwitch */ map_key_clear(BTN_STYLUS2); break; + case 0x5b: /* TransducerSerialNumber */ + set_bit(MSC_SERIAL, input->mscbit); + break; + default: goto unknown; } break; diff --git a/include/linux/hid.h b/include/linux/hid.h index 720e3a10608c..a468ec8cc4fe 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -233,11 +233,6 @@ struct hid_item { #define HID_DG_BARRELSWITCH 0x000d0044 #define HID_DG_ERASER 0x000d0045 #define HID_DG_TABLETPICK 0x000d0046 -/* - * as of May 20, 2009 the usages below are not yet in the official USB spec - * but are being pushed by Microsft as described in their paper "Digitizer - * Drivers for Windows Touch and Pen-Based Computers" - */ #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -246,6 +241,8 @@ struct hid_item { #define HID_DG_DEVICEINDEX 0x000d0053 #define HID_DG_CONTACTCOUNT 0x000d0054 #define HID_DG_CONTACTMAX 0x000d0055 +#define HID_DG_BARRELSWITCH2 0x000d005a +#define HID_DG_TOOLSERIALNUMBER 0x000d005b /* * HID report types --- Ouch! HID spec says 1 2 3! -- cgit v1.2.3 From e6cdb0929fe6726ba5203fc5529b74564d98a9e9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 3 Jun 2014 11:24:06 +0800 Subject: blk-mq: fix sparse warning on missed __percpu annotation 'struct blk_mq_ctx' is __percpu, so add the annotation and fix the sparse warning reported from Fengguang: [block:for-linus 2/3] block/blk-mq.h:75:16: sparse: incorrect type in initializer (different address spaces) Reported-by: kbuild test robot Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- include/linux/blkdev.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 43eb3156e110..3bb4cfec276b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1767,7 +1767,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; - struct blk_mq_ctx *ctx; + struct blk_mq_ctx __percpu *ctx; struct request_queue *q; unsigned int *map; int i; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8aba35f46f87..5c6f836afa1b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -335,7 +335,7 @@ struct request_queue { unsigned int *mq_map; /* sw queues */ - struct blk_mq_ctx *queue_ctx; + struct blk_mq_ctx __percpu *queue_ctx; unsigned int nr_queues; /* hw dispatch queues */ -- cgit v1.2.3 From 64c5c759084e153272eb05f4103de3e0adf5a88a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 4 Jun 2014 04:40:19 -0500 Subject: of/irq: provide more wrappers for !CONFIG_OF The pci-rcar driver is enabled for compile tests, and this has now shown that the driver cannot build without CONFIG_OF, following the inclusion of f8f2fe7355fb "PCI: rcar: Use new OF interrupt mapping when possible": drivers/built-in.o: In function `rcar_pci_map_irq': :(.text+0x1cc7c): undefined reference to `of_irq_parse_and_map_pci' pci/host/pcie-rcar.c: In function 'pci_dma_range_parser_init': pci/host/pcie-rcar.c:875:2: error: implicit declaration of function 'of_n_addr_cells' [-Werror=implicit-function-declaration] As pointed out by Ben Dooks and Geert Uytterhoeven, this is actually supposed to build fine, which we can achieve if we make the declaration of of_irq_parse_and_map_pci conditional on CONFIG_OF and provide an empty inline function otherwise, as we do for a lot of other of interfaces. This lets us build the rcar_pci driver again without CONFIG_OF for build testing. All platforms using this driver select OF, so this doesn't change anything for the users. Signed-off-by: Arnd Bergmann Cc: devicetree@vger.kernel.org Cc: Rob Herring Cc: Grant Likely Cc: Lucas Stach Cc: Bjorn Helgaas Cc: Magnus Damm Cc: Geert Uytterhoeven Cc: Ben Dooks Cc: linux-pci@vger.kernel.org Cc: linux-sh@vger.kernel.org [robh: drop wrappers for of_n_addr_cells and of_n_size_cells which are low-level functions that should not be used for !OF] Signed-off-by: Rob Herring --- include/linux/of_pci.h | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 1a1f5ffd5288..dde3a4a0fa5d 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -6,14 +6,44 @@ struct pci_dev; struct of_phandle_args; -int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); -int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); - struct device_node; + +#ifdef CONFIG_OF +int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); int of_pci_get_devfn(struct device_node *np); +int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); +#else +static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) +{ + return 0; +} + +static inline struct device_node *of_pci_find_child_device(struct device_node *parent, + unsigned int devfn) +{ + return NULL; +} + +static inline int of_pci_get_devfn(struct device_node *np) +{ + return -EINVAL; +} + +static inline int +of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return 0; +} + +static inline int +of_pci_parse_bus_range(struct device_node *node, struct resource *res) +{ + return -EINVAL; +} +#endif #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) int of_pci_msi_chip_add(struct msi_chip *chip); -- cgit v1.2.3 From 0e62f51f8753b048f391ee2d7f2af1f7297b0be5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 4 Jun 2014 10:23:49 -0600 Subject: blk-mq: let blk_mq_tag_to_rq() take blk_mq_tags as the main parameter We currently pass in the hardware queue, and get the tags from there. But from scsi-mq, with a shared tag space, it's a lot more convenient to pass in the blk_mq_tags instead as the hardware queue isn't always directly available. So instead of having to re-map to a given hardware queue from rq->mq_ctx, just pass in the tags structure. Signed-off-by: Jens Axboe --- block/blk-mq.c | 19 ++++++++++++------- drivers/block/mtip32xx/mtip32xx.c | 4 +++- include/linux/blk-mq.h | 2 +- 3 files changed, 16 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 4e8e8cf00815..4e4cd6208052 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -529,15 +529,20 @@ void blk_mq_kick_requeue_list(struct request_queue *q) } EXPORT_SYMBOL(blk_mq_kick_requeue_list); -struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) +static inline bool is_flush_request(struct request *rq, unsigned int tag) { - struct request_queue *q = hctx->queue; + return ((rq->cmd_flags & REQ_FLUSH_SEQ) && + rq->q->flush_rq->tag == tag); +} - if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && - q->flush_rq->tag == tag) - return q->flush_rq; +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) +{ + struct request *rq = tags->rqs[tag]; + + if (!is_flush_request(rq, tag)) + return rq; - return hctx->tags->rqs[tag]; + return rq->q->flush_rq; } EXPORT_SYMBOL(blk_mq_tag_to_rq); @@ -566,7 +571,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) if (tag >= hctx->tags->nr_tags) break; - rq = blk_mq_tag_to_rq(hctx, tag++); + rq = blk_mq_tag_to_rq(hctx->tags, tag++); if (rq->q != hctx->queue) continue; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index abc858b3528b..74abd49fabdc 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -193,7 +193,9 @@ static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) static struct request *mtip_rq_from_tag(struct driver_data *dd, unsigned int tag) { - return blk_mq_tag_to_rq(dd->queue->queue_hw_ctx[0], tag); + struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; + + return blk_mq_tag_to_rq(hctx->tags, tag); } static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c15128833100..0feedebfde48 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -155,7 +155,7 @@ void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); -struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag); +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -- cgit v1.2.3 From c177c81e09e517bbf75b67762cdab1b83aba6976 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 4 Jun 2014 16:05:35 -0700 Subject: hugetlb: restrict hugepage_migration_support() to x86_64 Currently hugepage migration is available for all archs which support pmd-level hugepage, but testing is done only for x86_64 and there're bugs for other archs. So to avoid breaking such archs, this patch limits the availability strictly to x86_64 until developers of other archs get interested in enabling this feature. Simply disabling hugepage migration on non-x86_64 archs is not enough to fix the reported problem where sys_move_pages() hits the BUG_ON() in follow_page(FOLL_GET), so let's fix this by checking if hugepage migration is supported in vma_migratable(). Signed-off-by: Naoya Horiguchi Reported-by: Michael Ellerman Tested-by: Michael Ellerman Acked-by: Hugh Dickins Cc: Benjamin Herrenschmidt Cc: Tony Luck Cc: Russell King Cc: Martin Schwidefsky Cc: James Hogan Cc: Ralf Baechle Cc: David Miller Cc: [3.12+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/hugetlbpage.c | 5 ----- arch/arm64/mm/hugetlbpage.c | 5 ----- arch/ia64/mm/hugetlbpage.c | 5 ----- arch/metag/mm/hugetlbpage.c | 5 ----- arch/mips/mm/hugetlbpage.c | 5 ----- arch/powerpc/mm/hugetlbpage.c | 10 ---------- arch/s390/mm/hugetlbpage.c | 5 ----- arch/sh/mm/hugetlbpage.c | 5 ----- arch/sparc/mm/hugetlbpage.c | 5 ----- arch/tile/mm/hugetlbpage.c | 5 ----- arch/x86/Kconfig | 4 ++++ arch/x86/mm/hugetlbpage.c | 10 ---------- include/linux/hugetlb.h | 13 +++++-------- include/linux/mempolicy.h | 6 ++++++ mm/Kconfig | 3 +++ 15 files changed, 18 insertions(+), 73 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c index 54ee6163c181..66781bf34077 100644 --- a/arch/arm/mm/hugetlbpage.c +++ b/arch/arm/mm/hugetlbpage.c @@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd) { return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); } - -int pmd_huge_support(void) -{ - return 1; -} diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 31eb959e9aa8..023747bf4dd7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -58,11 +58,6 @@ int pud_huge(pud_t pud) #endif } -int pmd_huge_support(void) -{ - return 1; -} - static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 68232db98baa..76069c18ee42 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -114,11 +114,6 @@ int pud_huge(pud_t pud) return 0; } -int pmd_huge_support(void) -{ - return 0; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c index 042431509b56..3c52fa6d0f8e 100644 --- a/arch/metag/mm/hugetlbpage.c +++ b/arch/metag/mm/hugetlbpage.c @@ -110,11 +110,6 @@ int pud_huge(pud_t pud) return 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index 77e0ae036e7c..4ec8ee10d371 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -84,11 +84,6 @@ int pud_huge(pud_t pud) return (pud_val(pud) & _PAGE_HUGE) != 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index eb923654ba80..7e70ae968e5f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd) */ return ((pgd_val(pgd) & 0x3) != 0x0); } - -int pmd_huge_support(void) -{ - return 1; -} #else int pmd_huge(pmd_t pmd) { @@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd) { return 0; } - -int pmd_huge_support(void) -{ - return 0; -} #endif pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 0727a55d87d9..0ff66a7e29bb 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -220,11 +220,6 @@ int pud_huge(pud_t pud) return 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int write) { diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 0d676a41081e..d7762349ea48 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -83,11 +83,6 @@ int pud_huge(pud_t pud) return 0; } -int pmd_huge_support(void) -{ - return 0; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 9bd9ce80bf77..d329537739c6 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -231,11 +231,6 @@ int pud_huge(pud_t pud) return 0; } -int pmd_huge_support(void) -{ - return 0; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 0cb3bbaa580c..e514899e1100 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -166,11 +166,6 @@ int pud_huge(pud_t pud) return !!(pud_val(pud) & _PAGE_HUGE_PAGE); } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7d5feb5908dd..e41b258ad040 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1873,6 +1873,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y depends on X86_64 || X86_PAE +config ARCH_ENABLE_HUGEPAGE_MIGRATION + def_bool y + depends on X86_64 && HUGETLB_PAGE && MIGRATION + menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 8c9f647ff9e1..8b977ebf9388 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, { return NULL; } - -int pmd_huge_support(void) -{ - return 0; -} #else struct page * @@ -80,11 +75,6 @@ int pud_huge(pud_t pud) { return !!(pud_val(pud) & _PAGE_PSE); } - -int pmd_huge_support(void) -{ - return 1; -} #endif #ifdef CONFIG_HUGETLB_PAGE diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b65166de1d9d..d0bad1a8b0bd 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page) extern void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -int pmd_huge_support(void); -/* - * Currently hugepage migration is enabled only for pmd-based hugepage. - * This function will be updated when hugepage migration is more widely - * supported. - */ static inline int hugepage_migration_support(struct hstate *h) { - return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION + return huge_page_shift(h) == PMD_SHIFT; +#else + return 0; +#endif } static inline spinlock_t *huge_pte_lockptr(struct hstate *h, @@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page) return page->index; } #define dissolve_free_huge_pages(s, e) do {} while (0) -#define pmd_huge_support() 0 #define hugepage_migration_support(h) 0 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3c1b968da0ca..f230a978e6ba 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return 0; + +#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION + if (vma->vm_flags & VM_HUGETLB) + return 0; +#endif + /* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not diff --git a/mm/Kconfig b/mm/Kconfig index 28cec518f4d4..75ac479cbacd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -267,6 +267,9 @@ config MIGRATION pages as migration can relocate pages to satisfy a huge page allocation instead of reclaiming. +config ARCH_ENABLE_HUGEPAGE_MIGRATION + boolean + config PHYS_ADDR_T_64BIT def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT -- cgit v1.2.3 From ac13a829f6adb674015ab399594c089990104af7 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Wed, 4 Jun 2014 16:06:27 -0700 Subject: fs/libfs.c: add generic data flush to fsync Description by Jan Kara: "A lot of older filesystems don't properly flush volatile disk caches on fsync(2) which can lead to loss of fsynced data after power failure. This patch makes generic_file_fsync() issue proper cache flush to fix the problem. Sysadmin can use /sys/devices/.../cache_type to tell the system it should not send the cache flush." [akpm@linux-foundation.org: nuke ifdef] [akpm@linux-foundation.org: fix warning] Signed-off-by: Fabian Frederick Suggested-by: Jan Kara Suggested-by: Christoph Hellwig Cc: Jan Kara Cc: Christoph Hellwig Cc: Alexander Viro Cc: "Theodore Ts'o" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/libfs.c | 34 +++++++++++++++++++++++++++++++--- include/linux/blkdev.h | 9 +++++++++ include/linux/fs.h | 1 + 3 files changed, 41 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/libfs.c b/fs/libfs.c index a1844244246f..88e3e00e2eca 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -3,6 +3,7 @@ * Library for filesystems writers. */ +#include #include #include #include @@ -923,16 +924,19 @@ struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, EXPORT_SYMBOL_GPL(generic_fh_to_parent); /** - * generic_file_fsync - generic fsync implementation for simple filesystems + * __generic_file_fsync - generic fsync implementation for simple filesystems + * * @file: file to synchronize + * @start: start offset in bytes + * @end: end offset in bytes (inclusive) * @datasync: only synchronize essential metadata if true * * This is a generic implementation of the fsync method for simple * filesystems which track all non-inode metadata in the buffers list * hanging off the address_space structure. */ -int generic_file_fsync(struct file *file, loff_t start, loff_t end, - int datasync) +int __generic_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { struct inode *inode = file->f_mapping->host; int err; @@ -952,10 +956,34 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end, err = sync_inode_metadata(inode, 1); if (ret == 0) ret = err; + out: mutex_unlock(&inode->i_mutex); return ret; } +EXPORT_SYMBOL(__generic_file_fsync); + +/** + * generic_file_fsync - generic fsync implementation for simple filesystems + * with flush + * @file: file to synchronize + * @start: start offset in bytes + * @end: end offset in bytes (inclusive) + * @datasync: only synchronize essential metadata if true + * + */ + +int generic_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync) +{ + struct inode *inode = file->f_mapping->host; + int err; + + err = __generic_file_fsync(file, start, end, datasync); + if (err) + return err; + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); +} EXPORT_SYMBOL(generic_file_fsync); /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8aba35f46f87..45cf6e537c83 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1607,6 +1607,9 @@ struct block_device_operations { extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long); #else /* CONFIG_BLOCK */ + +struct block_device; + /* * stubs for when the block layer is configured out */ @@ -1642,6 +1645,12 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) return false; } +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, + sector_t *error_sector) +{ + return 0; +} + #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 878031227c57..c3f46e499dd0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2590,6 +2590,7 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count); +extern int __generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); -- cgit v1.2.3 From c46a7c817e662a820373bb76b88d0ad67d6abe5d Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:06:30 -0700 Subject: x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels _PAGE_NUMA is currently an alias of _PROT_PROTNONE to trap NUMA hinting faults on x86. Care is taken such that _PAGE_NUMA is used only in situations where the VMA flags distinguish between NUMA hinting faults and prot_none faults. This decision was x86-specific and conceptually it is difficult requiring special casing to distinguish between PROTNONE and NUMA ptes based on context. Fundamentally, we only need the _PAGE_NUMA bit to tell the difference between an entry that is really unmapped and a page that is protected for NUMA hinting faults as if the PTE is not present then a fault will be trapped. Swap PTEs on x86-64 use the bits after _PAGE_GLOBAL for the offset. This patch shrinks the maximum possible swap size and uses the bit to uniquely distinguish between NUMA hinting ptes and swap ptes. Signed-off-by: Mel Gorman Cc: David Vrabel Cc: Ingo Molnar Cc: Peter Anvin Cc: Fengguang Wu Cc: Linus Torvalds Cc: Steven Noonan Cc: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: Dave Hansen Cc: Srikar Dronamraju Cc: Cyrill Gorcunov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pgtable.h | 6 ++++ arch/x86/include/asm/pgtable.h | 15 +++++--- arch/x86/include/asm/pgtable_64.h | 8 +++++ arch/x86/include/asm/pgtable_types.h | 66 +++++++++++++++++++----------------- arch/x86/mm/pageattr-test.c | 2 +- include/asm-generic/pgtable.h | 8 +++-- include/linux/swapops.h | 2 +- mm/memory.c | 17 ++++------ 8 files changed, 75 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 3ebb188c3ff5..d98c1ecc3266 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -44,6 +44,12 @@ static inline int pte_present(pte_t pte) return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); } +#define pte_present_nonuma pte_present_nonuma +static inline int pte_present_nonuma(pte_t pte) +{ + return pte_val(pte) & (_PAGE_PRESENT); +} + #define pte_numa pte_numa static inline int pte_numa(pte_t pte) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index b459ddf27d64..66276c1d23bb 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -131,7 +131,8 @@ static inline int pte_exec(pte_t pte) static inline int pte_special(pte_t pte) { - return pte_flags(pte) & _PAGE_SPECIAL; + return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) == + (_PAGE_PRESENT|_PAGE_SPECIAL); } static inline unsigned long pte_pfn(pte_t pte) @@ -452,6 +453,12 @@ static inline int pte_present(pte_t a) _PAGE_NUMA); } +#define pte_present_nonuma pte_present_nonuma +static inline int pte_present_nonuma(pte_t a) +{ + return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); +} + #define pte_accessible pte_accessible static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { @@ -860,19 +867,19 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { - VM_BUG_ON(pte_present(pte)); + VM_BUG_ON(pte_present_nonuma(pte)); return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); } static inline int pte_swp_soft_dirty(pte_t pte) { - VM_BUG_ON(pte_present(pte)); + VM_BUG_ON(pte_present_nonuma(pte)); return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; } static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { - VM_BUG_ON(pte_present(pte)); + VM_BUG_ON(pte_present_nonuma(pte)); return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); } diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e22c1dbf7feb..6d6ecd09883c 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -145,8 +145,16 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) +#ifdef CONFIG_NUMA_BALANCING +/* Automatic NUMA balancing needs to be distinguishable from swap entries */ +#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 2) +#else #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) +#endif #else +#ifdef CONFIG_NUMA_BALANCING +#error Incompatible format for automatic NUMA balancing +#endif #define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1) #define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1) #endif diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index eb3d44945133..f216963760e5 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -16,15 +16,26 @@ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ -#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ -#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ +#define _PAGE_BIT_SOFTW1 9 /* available for programmer */ +#define _PAGE_BIT_SOFTW2 10 /* " */ +#define _PAGE_BIT_SOFTW3 11 /* " */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ +#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */ +#define _PAGE_BIT_IOMAP _PAGE_BIT_SOFTW2 /* flag used to indicate IO mapping */ +#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ +#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ +/* + * Swap offsets on configurations that allow automatic NUMA balancing use the + * bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from + * swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the + * maximum possible swap space from 16TB to 8TB. + */ +#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1) + /* If _PAGE_BIT_PRESENT is clear, we use these: */ /* - if the user mapped it with PROT_NONE; pte_present gives true */ #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL @@ -40,7 +51,7 @@ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) +#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1) #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) @@ -61,14 +72,27 @@ * they do not conflict with each other. */ -#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN - #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #endif +/* + * _PAGE_NUMA distinguishes between a numa hinting minor fault and a page + * that is not present. The hinting fault gathers numa placement statistics + * (see pte_numa()). The bit is always zero when the PTE is not present. + * + * The bit picked must be always zero when the pmd is present and not + * present, so that we don't lose information when we set it while + * atomically clearing the present bit. + */ +#ifdef CONFIG_NUMA_BALANCING +#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA) +#else +#define _PAGE_NUMA (_AT(pteval_t, 0)) +#endif + /* * Tracking soft dirty bit when a page goes to a swap is tricky. * We need a bit which can be stored in pte _and_ not conflict @@ -94,26 +118,6 @@ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) -/* - * _PAGE_NUMA indicates that this page will trigger a numa hinting - * minor page fault to gather numa placement statistics (see - * pte_numa()). The bit picked (8) is within the range between - * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't - * require changes to the swp entry format because that bit is always - * zero when the pte is not present. - * - * The bit picked must be always zero when the pmd is present and not - * present, so that we don't lose information when we set it while - * atomically clearing the present bit. - * - * Because we shared the same bit (8) with _PAGE_PROTNONE this can be - * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE - * couldn't reach, like handle_mm_fault() (see access_error in - * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for - * handle_mm_fault() to be invoked). - */ -#define _PAGE_NUMA _PAGE_PROTNONE - #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ @@ -122,8 +126,8 @@ /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SOFT_DIRTY) -#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) + _PAGE_SOFT_DIRTY | _PAGE_NUMA) +#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_WB (0) diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 461bc8289024..6629f397b467 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -35,7 +35,7 @@ enum { static int pte_testbit(pte_t pte) { - return pte_flags(pte) & _PAGE_UNUSED1; + return pte_flags(pte) & _PAGE_SOFTW1; } struct split_state { diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index a8015a7a55bb..53b2acc38213 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -233,6 +233,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) # define pte_accessible(mm, pte) ((void)(pte), 1) #endif +#ifndef pte_present_nonuma +#define pte_present_nonuma(pte) pte_present(pte) +#endif + #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif @@ -670,7 +674,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd) static inline int pte_numa(pte_t pte) { return (pte_flags(pte) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; + (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; } #endif @@ -678,7 +682,7 @@ static inline int pte_numa(pte_t pte) static inline int pmd_numa(pmd_t pmd) { return (pmd_flags(pmd) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; + (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; } #endif diff --git a/include/linux/swapops.h b/include/linux/swapops.h index c0f75261a728..6adfb7bfbf44 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_entry_t entry) /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { - return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); + return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte); } #endif diff --git a/mm/memory.c b/mm/memory.c index e302ae1dcce0..0897830011f3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -756,7 +756,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn = pte_pfn(pte); if (HAVE_PTE_SPECIAL) { - if (likely(!pte_special(pte))) + if (likely(!pte_special(pte) || pte_numa(pte))) goto check_pfn; if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; @@ -782,14 +782,15 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, } } - if (is_zero_pfn(pfn)) - return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { print_bad_pte(vma, addr, pte, NULL); return NULL; } + if (is_zero_pfn(pfn)) + return NULL; + /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. @@ -1722,13 +1723,9 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* - * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault - * would be called on PROT_NONE ranges. We must never invoke - * handle_mm_fault on PROT_NONE ranges or the NUMA hinting - * page faults would unprotect the PROT_NONE ranges if - * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd - * bitflag. So to avoid that, don't set FOLL_NUMA if - * FOLL_FORCE is set. + * If FOLL_FORCE is set then do not force a full fault as the hinting + * fault information is unrelated to the reference behaviour of a task + * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; -- cgit v1.2.3 From 5dfb417509921eb90ee123a4d1525e8916b4ace4 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:06:38 -0700 Subject: sl[au]b: charge slabs to kmemcg explicitly We have only a few places where we actually want to charge kmem so instead of intruding into the general page allocation path with __GFP_KMEMCG it's better to explictly charge kmem there. All kmem charges will be easier to follow that way. This is a step towards removing __GFP_KMEMCG. It removes __GFP_KMEMCG from memcg caches' allocflags. Instead it makes slab allocation path call memcg_charge_kmem directly getting memcg to charge from the cache's memcg params. This also eliminates any possibility of misaccounting an allocation going from one memcg's cache to another memcg, because now we always charge slabs against the memcg the cache belongs to. That's why this patch removes the big comment to memcg_kmem_get_cache. Signed-off-by: Vladimir Davydov Acked-by: Greg Thelen Cc: Johannes Weiner Acked-by: Michal Hocko Cc: Glauber Costa Cc: Christoph Lameter Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 15 ++++----------- mm/memcontrol.c | 4 ++-- mm/slab.c | 7 ++++++- mm/slab.h | 29 +++++++++++++++++++++++++++++ mm/slab_common.c | 6 +----- mm/slub.c | 24 +++++++++++++++++------- 6 files changed, 59 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b569b8be5c5a..96e5d2573eb0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -506,6 +506,9 @@ void memcg_update_array_size(int num_groups); struct kmem_cache * __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); +int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size); +void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size); + void mem_cgroup_destroy_cache(struct kmem_cache *cachep); int __kmem_cache_destroy_memcg_children(struct kmem_cache *s); @@ -583,17 +586,7 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) * @cachep: the original global kmem cache * @gfp: allocation flags. * - * This function assumes that the task allocating, which determines the memcg - * in the page allocator, belongs to the same cgroup throughout the whole - * process. Misacounting can happen if the task calls memcg_kmem_get_cache() - * while belonging to a cgroup, and later on changes. This is considered - * acceptable, and should only happen upon task migration. - * - * Before the cache is created by the memcg core, there is also a possible - * imbalance: the task belongs to a memcg, but the cache being allocated from - * is the global cache, since the child cache is not yet guaranteed to be - * ready. This case is also fine, since in this case the GFP_KMEMCG will not be - * passed and the page allocator will not attempt any cgroup accounting. + * All memory allocated from a per-memcg cache is charged to the owner memcg. */ static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5177c6d4a2dd..56a768b3d5a8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2953,7 +2953,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) } #endif -static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) +int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) { struct res_counter *fail_res; int ret = 0; @@ -2991,7 +2991,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) return ret; } -static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) +void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) { res_counter_uncharge(&memcg->res, size); if (do_swap_account) diff --git a/mm/slab.c b/mm/slab.c index 5c846d25c17d..944ac58cfcf8 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1688,8 +1688,12 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, if (cachep->flags & SLAB_RECLAIM_ACCOUNT) flags |= __GFP_RECLAIMABLE; + if (memcg_charge_slab(cachep, flags, cachep->gfporder)) + return NULL; + page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); if (!page) { + memcg_uncharge_slab(cachep, cachep->gfporder); slab_out_of_memory(cachep, flags, nodeid); return NULL; } @@ -1747,7 +1751,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) memcg_release_pages(cachep, cachep->gfporder); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; - __free_memcg_kmem_pages(page, cachep->gfporder); + __free_pages(page, cachep->gfporder); + memcg_uncharge_slab(cachep, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) diff --git a/mm/slab.h b/mm/slab.h index 6bd4c353704f..863e67b8c8c9 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -192,6 +192,26 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s; return s->memcg_params->root_cache; } + +static __always_inline int memcg_charge_slab(struct kmem_cache *s, + gfp_t gfp, int order) +{ + if (!memcg_kmem_enabled()) + return 0; + if (is_root_cache(s)) + return 0; + return memcg_charge_kmem(s->memcg_params->memcg, gfp, + PAGE_SIZE << order); +} + +static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) +{ + if (!memcg_kmem_enabled()) + return; + if (is_root_cache(s)) + return; + memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order); +} #else static inline bool is_root_cache(struct kmem_cache *s) { @@ -227,6 +247,15 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) { return s; } + +static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) +{ + return 0; +} + +static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) +{ +} #endif static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) diff --git a/mm/slab_common.c b/mm/slab_common.c index 102cc6fca3d3..06f0c6125632 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -290,12 +290,8 @@ void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_c root_cache->size, root_cache->align, root_cache->flags, root_cache->ctor, memcg, root_cache); - if (IS_ERR(s)) { + if (IS_ERR(s)) kfree(cache_name); - goto out_unlock; - } - - s->allocflags |= __GFP_KMEMCG; out_unlock: mutex_unlock(&slab_mutex); diff --git a/mm/slub.c b/mm/slub.c index d05a5483106d..fc9831851be6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1312,17 +1312,26 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) /* * Slab allocation and freeing */ -static inline struct page *alloc_slab_page(gfp_t flags, int node, - struct kmem_cache_order_objects oo) +static inline struct page *alloc_slab_page(struct kmem_cache *s, + gfp_t flags, int node, struct kmem_cache_order_objects oo) { + struct page *page; int order = oo_order(oo); flags |= __GFP_NOTRACK; + if (memcg_charge_slab(s, flags, order)) + return NULL; + if (node == NUMA_NO_NODE) - return alloc_pages(flags, order); + page = alloc_pages(flags, order); else - return alloc_pages_exact_node(node, flags, order); + page = alloc_pages_exact_node(node, flags, order); + + if (!page) + memcg_uncharge_slab(s, order); + + return page; } static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -1344,7 +1353,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) */ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; - page = alloc_slab_page(alloc_gfp, node, oo); + page = alloc_slab_page(s, alloc_gfp, node, oo); if (unlikely(!page)) { oo = s->min; alloc_gfp = flags; @@ -1352,7 +1361,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ - page = alloc_slab_page(alloc_gfp, node, oo); + page = alloc_slab_page(s, alloc_gfp, node, oo); if (page) stat(s, ORDER_FALLBACK); @@ -1468,7 +1477,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) page_mapcount_reset(page); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; - __free_memcg_kmem_pages(page, order); + __free_pages(page, order); + memcg_uncharge_slab(s, order); } #define need_reserve_slab_rcu \ -- cgit v1.2.3 From 52383431b37cdbec63944e953ffc2698a7ad9722 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:06:39 -0700 Subject: mm: get rid of __GFP_KMEMCG Currently to allocate a page that should be charged to kmemcg (e.g. threadinfo), we pass __GFP_KMEMCG flag to the page allocator. The page allocated is then to be freed by free_memcg_kmem_pages. Apart from looking asymmetrical, this also requires intrusion to the general allocation path. So let's introduce separate functions that will alloc/free pages charged to kmemcg. The new functions are called alloc_kmem_pages and free_kmem_pages. They should be used when the caller actually would like to use kmalloc, but has to fall back to the page allocator for the allocation is large. They only differ from alloc_pages and free_pages in that besides allocating or freeing pages they also charge them to the kmem resource counter of the current memory cgroup. [sfr@canb.auug.org.au: export kmalloc_order() to modules] Signed-off-by: Vladimir Davydov Acked-by: Greg Thelen Cc: Johannes Weiner Acked-by: Michal Hocko Cc: Glauber Costa Cc: Christoph Lameter Cc: Pekka Enberg Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 10 +++++--- include/linux/memcontrol.h | 2 +- include/linux/slab.h | 11 +------- include/linux/thread_info.h | 2 -- include/trace/events/gfpflags.h | 1 - kernel/fork.c | 6 ++--- mm/memcontrol.c | 11 ++++---- mm/page_alloc.c | 56 +++++++++++++++++++++++++---------------- mm/slab_common.c | 13 ++++++++++ mm/slub.c | 6 ++--- 10 files changed, 68 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 39b81dc7d01a..d382db71e300 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -31,7 +31,6 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_RECLAIMABLE 0x80000u -#define ___GFP_KMEMCG 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_NO_KSWAPD 0x400000u #define ___GFP_OTHER_NODE 0x800000u @@ -91,7 +90,6 @@ struct vm_area_struct; #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ -#define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */ #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ /* @@ -353,6 +351,10 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, 0, vma, addr, node) +extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); +extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, + unsigned int order); + extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -372,8 +374,8 @@ extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_cold_page(struct page *page, int cold); extern void free_hot_cold_page_list(struct list_head *list, int cold); -extern void __free_memcg_kmem_pages(struct page *page, unsigned int order); -extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order); +extern void __free_kmem_pages(struct page *page, unsigned int order); +extern void free_kmem_pages(unsigned long addr, unsigned int order); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 96e5d2573eb0..5155d09e749d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -537,7 +537,7 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * res_counter_charge_nofail, but we hope those allocations are rare, * and won't be worth the trouble. */ - if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL)) + if (gfp & __GFP_NOFAIL) return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; diff --git a/include/linux/slab.h b/include/linux/slab.h index 307bfbe62387..a6aab2c0dfc5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -369,16 +369,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, #include #endif -static __always_inline void * -kmalloc_order(size_t size, gfp_t flags, unsigned int order) -{ - void *ret; - - flags |= (__GFP_COMP | __GFP_KMEMCG); - ret = (void *) __get_free_pages(flags, order); - kmemleak_alloc(ret, size, 1, flags); - return ret; -} +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); #ifdef CONFIG_TRACING extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index cb0cec94fda3..ff307b548ed3 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -61,8 +61,6 @@ extern long do_no_restart_syscall(struct restart_block *parm); # define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) #endif -#define THREADINFO_GFP_ACCOUNTED (THREADINFO_GFP | __GFP_KMEMCG) - /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index 1eddbf1557f2..d6fd8e5b14b7 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h @@ -34,7 +34,6 @@ {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_KMEMCG, "GFP_KMEMCG"}, \ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ diff --git a/kernel/fork.c b/kernel/fork.c index 54a8d26f612f..59e3dcc5b8f2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -150,15 +150,15 @@ void __weak arch_release_thread_info(struct thread_info *ti) static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { - struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED, - THREAD_SIZE_ORDER); + struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, + THREAD_SIZE_ORDER); return page ? page_address(page) : NULL; } static inline void free_thread_info(struct thread_info *ti) { - free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); + free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); } # else static struct kmem_cache *thread_info_cache; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 56a768b3d5a8..7bab1de50f48 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3540,11 +3540,12 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) /* * Disabling accounting is only relevant for some specific memcg * internal allocations. Therefore we would initially not have such - * check here, since direct calls to the page allocator that are marked - * with GFP_KMEMCG only happen outside memcg core. We are mostly - * concerned with cache allocations, and by having this test at - * memcg_kmem_get_cache, we are already able to relay the allocation to - * the root cache and bypass the memcg cache altogether. + * check here, since direct calls to the page allocator that are + * accounted to kmemcg (alloc_kmem_pages and friends) only happen + * outside memcg core. We are mostly concerned with cache allocations, + * and by having this test at memcg_kmem_get_cache, we are already able + * to relay the allocation to the root cache and bypass the memcg cache + * altogether. * * There is one exception, though: the SLUB allocator does not create * large order caches, but rather service large kmallocs directly from diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5dba2933c9c0..7cfdcd808f52 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2697,7 +2697,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int migratetype = allocflags_to_migratetype(gfp_mask); unsigned int cpuset_mems_cookie; int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; - struct mem_cgroup *memcg = NULL; gfp_mask &= gfp_allowed_mask; @@ -2716,13 +2715,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!zonelist->_zonerefs->zone)) return NULL; - /* - * Will only have any effect when __GFP_KMEMCG is set. This is - * verified in the (always inline) callee - */ - if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) - return NULL; - retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); @@ -2782,8 +2774,6 @@ out: if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; - memcg_kmem_commit_charge(page, memcg, order); - return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -2837,27 +2827,51 @@ void free_pages(unsigned long addr, unsigned int order) EXPORT_SYMBOL(free_pages); /* - * __free_memcg_kmem_pages and free_memcg_kmem_pages will free - * pages allocated with __GFP_KMEMCG. + * alloc_kmem_pages charges newly allocated pages to the kmem resource counter + * of the current memory cgroup. * - * Those pages are accounted to a particular memcg, embedded in the - * corresponding page_cgroup. To avoid adding a hit in the allocator to search - * for that information only to find out that it is NULL for users who have no - * interest in that whatsoever, we provide these functions. - * - * The caller knows better which flags it relies on. + * It should be used when the caller would like to use kmalloc, but since the + * allocation is large, it has to fall back to the page allocator. + */ +struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) +{ + struct page *page; + struct mem_cgroup *memcg = NULL; + + if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) + return NULL; + page = alloc_pages(gfp_mask, order); + memcg_kmem_commit_charge(page, memcg, order); + return page; +} + +struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) +{ + struct page *page; + struct mem_cgroup *memcg = NULL; + + if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) + return NULL; + page = alloc_pages_node(nid, gfp_mask, order); + memcg_kmem_commit_charge(page, memcg, order); + return page; +} + +/* + * __free_kmem_pages and free_kmem_pages will free pages allocated with + * alloc_kmem_pages. */ -void __free_memcg_kmem_pages(struct page *page, unsigned int order) +void __free_kmem_pages(struct page *page, unsigned int order) { memcg_kmem_uncharge_pages(page, order); __free_pages(page, order); } -void free_memcg_kmem_pages(unsigned long addr, unsigned int order) +void free_kmem_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); - __free_memcg_kmem_pages(virt_to_page((void *)addr), order); + __free_kmem_pages(virt_to_page((void *)addr), order); } } diff --git a/mm/slab_common.c b/mm/slab_common.c index 06f0c6125632..1950c8f4d1a6 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -582,6 +582,19 @@ void __init create_kmalloc_caches(unsigned long flags) } #endif /* !CONFIG_SLOB */ +void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) +{ + void *ret; + struct page *page; + + flags |= __GFP_COMP; + page = alloc_kmem_pages(flags, order); + ret = page ? page_address(page) : NULL; + kmemleak_alloc(ret, size, 1, flags); + return ret; +} +EXPORT_SYMBOL(kmalloc_order); + #ifdef CONFIG_TRACING void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) { diff --git a/mm/slub.c b/mm/slub.c index fc9831851be6..ddb60795f373 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3311,8 +3311,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) struct page *page; void *ptr = NULL; - flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG; - page = alloc_pages_node(node, flags, get_order(size)); + flags |= __GFP_COMP | __GFP_NOTRACK; + page = alloc_kmem_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); @@ -3381,7 +3381,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(x); - __free_memcg_kmem_pages(page, compound_order(page)); + __free_kmem_pages(page, compound_order(page)); return; } slab_free(page->slab_cache, page, object, _RET_IP_); -- cgit v1.2.3 From 4f115147ff802267d0aa41e361c5aa5bd933d896 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 4 Jun 2014 16:06:46 -0700 Subject: mm,vmacache: add debug data Introduce a CONFIG_DEBUG_VM_VMACACHE option to enable counting the cache hit rate -- exported in /proc/vmstat. Any updates to the caching scheme needs this kind of data, thus it can save some work re-implementing the counting all the time. Signed-off-by: Davidlohr Bueso Cc: Aswin Chandramouleeswaran Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vm_event_item.h | 4 ++++ include/linux/vmstat.h | 6 ++++++ lib/Kconfig.debug | 10 ++++++++++ mm/vmacache.c | 12 ++++++++++-- mm/vmstat.c | 4 ++++ 5 files changed, 34 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 486c3972c0be..ced92345c963 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -80,6 +80,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_TLB_LOCAL_FLUSH_ALL, NR_TLB_LOCAL_FLUSH_ONE, #endif /* CONFIG_DEBUG_TLBFLUSH */ +#ifdef CONFIG_DEBUG_VM_VMACACHE + VMACACHE_FIND_CALLS, + VMACACHE_FIND_HITS, +#endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 45c9cd1daf7a..82e7db7f7100 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -95,6 +95,12 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif +#ifdef CONFIG_DEBUG_VM_VMACACHE +#define count_vm_vmacache_event(x) count_vm_event(x) +#else +#define count_vm_vmacache_event(x) do {} while (0) +#endif + #define __count_zone_vm_events(item, zone, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ zone_idx(zone), delta) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 99c8bfee1b00..c2de65045a40 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -501,6 +501,16 @@ config DEBUG_VM If unsure, say N. +config DEBUG_VM_VMACACHE + bool "Debug VMA caching" + depends on DEBUG_VM + help + Enable this to turn on VMA caching debug information. Doing so + can cause significant overhead, so only enable it in non-production + environments. + + If unsure, say N. + config DEBUG_VM_RB bool "Debug VM red-black trees" depends on DEBUG_VM diff --git a/mm/vmacache.c b/mm/vmacache.c index 1037a3bab505..658ed3b3e38d 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -78,6 +78,8 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) if (!vmacache_valid(mm)) return NULL; + count_vm_vmacache_event(VMACACHE_FIND_CALLS); + for (i = 0; i < VMACACHE_SIZE; i++) { struct vm_area_struct *vma = current->vmacache[i]; @@ -85,8 +87,10 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) continue; if (WARN_ON_ONCE(vma->vm_mm != mm)) break; - if (vma->vm_start <= addr && vma->vm_end > addr) + if (vma->vm_start <= addr && vma->vm_end > addr) { + count_vm_vmacache_event(VMACACHE_FIND_HITS); return vma; + } } return NULL; @@ -102,11 +106,15 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, if (!vmacache_valid(mm)) return NULL; + count_vm_vmacache_event(VMACACHE_FIND_CALLS); + for (i = 0; i < VMACACHE_SIZE; i++) { struct vm_area_struct *vma = current->vmacache[i]; - if (vma && vma->vm_start == start && vma->vm_end == end) + if (vma && vma->vm_start == start && vma->vm_end == end) { + count_vm_vmacache_event(VMACACHE_FIND_HITS); return vma; + } } return NULL; diff --git a/mm/vmstat.c b/mm/vmstat.c index 302dd076b8bf..82ce17ce58c4 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -866,6 +866,10 @@ const char * const vmstat_text[] = { "nr_tlb_local_flush_one", #endif /* CONFIG_DEBUG_TLBFLUSH */ +#ifdef CONFIG_DEBUG_VM_VMACACHE + "vmacache_find_calls", + "vmacache_find_hits", +#endif #endif /* CONFIG_VM_EVENTS_COUNTERS */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ -- cgit v1.2.3 From 9c5a3621427da68afe6a078cadf807d2c8cc1d12 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 4 Jun 2014 16:06:50 -0700 Subject: x86: enable DMA CMA with swiotlb The DMA Contiguous Memory Allocator support on x86 is disabled when swiotlb config option is enabled. So DMA CMA is always disabled on x86_64 because swiotlb is always enabled. This attempts to support for DMA CMA with enabling swiotlb config option. The contiguous memory allocator on x86 is integrated in the function dma_generic_alloc_coherent() which is .alloc callback in nommu_dma_ops for dma_alloc_coherent(). x86_swiotlb_alloc_coherent() which is .alloc callback in swiotlb_dma_ops tries to allocate with dma_generic_alloc_coherent() firstly and then swiotlb_alloc_coherent() is called as a fallback. The main part of supporting DMA CMA with swiotlb is that changing x86_swiotlb_free_coherent() which is .free callback in swiotlb_dma_ops for dma_free_coherent() so that it can distinguish memory allocated by dma_generic_alloc_coherent() from one allocated by swiotlb_alloc_coherent() and release it with dma_generic_free_coherent() which can handle contiguous memory. This change requires making is_swiotlb_buffer() global function. This also needs to change .free callback in the dma_map_ops for amd_gart and sta2x11, because these dma_ops are also using dma_generic_alloc_coherent(). Signed-off-by: Akinobu Mita Acked-by: Marek Szyprowski Acked-by: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Don Dutile Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Andi Kleen Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 2 +- arch/x86/include/asm/swiotlb.h | 7 +++++++ arch/x86/kernel/amd_gart_64.c | 2 +- arch/x86/kernel/pci-swiotlb.c | 9 ++++++--- arch/x86/pci/sta2x11-fixup.c | 6 ++---- include/linux/swiotlb.h | 2 ++ lib/swiotlb.c | 2 +- 7 files changed, 20 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 896a411a4584..4a0137f6f032 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -41,7 +41,7 @@ config X86 select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_FRAME_POINTERS select HAVE_DMA_ATTRS - select HAVE_DMA_CONTIGUOUS if !SWIOTLB + select HAVE_DMA_CONTIGUOUS select HAVE_KRETPROBES select GENERIC_EARLY_IOREMAP select HAVE_OPTPROBES diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 977f1761a25d..ab05d73e2bb7 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h @@ -29,4 +29,11 @@ static inline void pci_swiotlb_late_init(void) static inline void dma_mark_clean(void *addr, size_t size) {} +extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs); +extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs); + #endif /* _ASM_X86_SWIOTLB_H */ diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index b574b295a2f9..8e3842fc8bea 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -512,7 +512,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, struct dma_attrs *attrs) { gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); - free_pages((unsigned long)vaddr, get_order(size)); + dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); } static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 6c483ba98b9c..77dd0ad58be4 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -14,7 +14,7 @@ #include int swiotlb __read_mostly; -static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, +void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { @@ -28,11 +28,14 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -static void x86_swiotlb_free_coherent(struct device *dev, size_t size, +void x86_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, struct dma_attrs *attrs) { - swiotlb_free_coherent(dev, size, vaddr, dma_addr); + if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) + swiotlb_free_coherent(dev, size, vaddr, dma_addr); + else + dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); } static struct dma_map_ops swiotlb_dma_ops = { diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 9d8a509c9730..5ceda85b8687 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -173,9 +173,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, { void *vaddr; - vaddr = dma_generic_alloc_coherent(dev, size, dma_handle, flags, attrs); - if (!vaddr) - vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, flags); + vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, flags, attrs); *dma_handle = p2a(*dma_handle, to_pci_dev(dev)); return vaddr; } @@ -183,7 +181,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, /* We have our own dma_ops: the same as swiotlb but from alloc (above) */ static struct dma_map_ops sta2x11_dma_ops = { .alloc = sta2x11_swiotlb_alloc_coherent, - .free = swiotlb_free_coherent, + .free = x86_swiotlb_free_coherent, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = swiotlb_map_sg_attrs, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index a5ffd32642fd..e7a018eaf3a2 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -116,4 +116,6 @@ static inline void swiotlb_free(void) { } #endif extern void swiotlb_print_info(void); +extern int is_swiotlb_buffer(phys_addr_t paddr); + #endif /* __LINUX_SWIOTLB_H */ diff --git a/lib/swiotlb.c b/lib/swiotlb.c index b604b831f4d1..649d097853a1 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -374,7 +374,7 @@ void __init swiotlb_free(void) io_tlb_nslabs = 0; } -static int is_swiotlb_buffer(phys_addr_t paddr) +int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= io_tlb_start && paddr < io_tlb_end; } -- cgit v1.2.3 From 2bfc2862c4fe38379a2fb2cfba33fad32ccb4ff4 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 4 Jun 2014 16:06:53 -0700 Subject: memblock: introduce memblock_alloc_range() This introduces memblock_alloc_range() which allocates memblock from the specified range of physical address. I would like to use this function to specify the location of CMA. Signed-off-by: Akinobu Mita Cc: Marek Szyprowski Cc: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Don Dutile Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Andi Kleen Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 2 ++ mm/memblock.c | 21 +++++++++++++++++---- 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 73dc382e72d8..b660e05b63d4 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -272,6 +272,8 @@ static inline bool memblock_bottom_up(void) { return false; } #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 +phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end); phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr); phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, diff --git a/mm/memblock.c b/mm/memblock.c index a810ba923cdd..146736411318 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1033,22 +1033,35 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, - phys_addr_t align, phys_addr_t max_addr, - int nid) +static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t start, + phys_addr_t end, int nid) { phys_addr_t found; if (!align) align = SMP_CACHE_BYTES; - found = memblock_find_in_range_node(size, align, 0, max_addr, nid); + found = memblock_find_in_range_node(size, align, start, end, nid); if (found && !memblock_reserve(found, size)) return found; return 0; } +phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end) +{ + return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); +} + +static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t max_addr, + int nid) +{ + return memblock_alloc_range_nid(size, align, 0, max_addr, nid); +} + phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) { return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); -- cgit v1.2.3 From 5ea3b1b2f8ad9162684431ce6188102ca4c64b7a Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 4 Jun 2014 16:06:54 -0700 Subject: cma: add placement specifier for "cma=" kernel parameter Currently, "cma=" kernel parameter is used to specify the size of CMA, but we can't specify where it is located. We want to locate CMA below 4GB for devices only supporting 32-bit addressing on 64-bit systems without iommu. This enables to specify the placement of CMA by extending "cma=" kernel parameter. Examples: 1. locate 64MB CMA below 4GB by "cma=64M@0-4G" 2. locate 64MB CMA exact at 512MB by "cma=64M@512M" Note that the DMA contiguous memory allocator on x86 assumes that page_address() works for the pages to allocate. So this change requires to limit end address of contiguous memory area upto max_pfn_mapped to prevent from locating it on highmem area by the argument of dma_contiguous_reserve(). Signed-off-by: Akinobu Mita Cc: Marek Szyprowski Cc: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Don Dutile Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Andi Kleen Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 7 +++++-- arch/x86/kernel/setup.c | 2 +- drivers/base/dma-contiguous.c | 42 ++++++++++++++++++++++++++++--------- include/linux/dma-contiguous.h | 9 +++++--- 4 files changed, 44 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index af55e13ace8f..adea3a22fa00 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -630,8 +630,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Also note the kernel might malfunction if you disable some critical bits. - cma=nn[MG] [ARM,KNL] - Sets the size of kernel global memory area for contiguous + cma=nn[MG]@[start[MG][-end[MG]]] + [ARM,X86,KNL] + Sets the size of kernel global memory area for + contiguous memory allocations and optionally the + placement constraint by the physical address range of memory allocations. For more information, see include/linux/dma-contiguous.h diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 09c76d265550..78a0e6298922 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p) setup_real_mode(); memblock_set_current_limit(get_max_mapped()); - dma_contiguous_reserve(0); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); /* * NOTE: On x86-32, only from this point on, fixmaps are ready for use. diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index c34ec3364243..83969f8c5727 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -60,11 +60,22 @@ struct cma *dma_contiguous_default_area; */ static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; static phys_addr_t size_cmdline = -1; +static phys_addr_t base_cmdline; +static phys_addr_t limit_cmdline; static int __init early_cma(char *p) { pr_debug("%s(%s)\n", __func__, p); size_cmdline = memparse(p, &p); + if (*p != '@') + return 0; + base_cmdline = memparse(p + 1, &p); + if (*p != '-') { + limit_cmdline = base_cmdline + size_cmdline; + return 0; + } + limit_cmdline = memparse(p + 1, &p); + return 0; } early_param("cma", early_cma); @@ -108,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t selected_size = 0; + phys_addr_t selected_base = 0; + phys_addr_t selected_limit = limit; + bool fixed = false; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { selected_size = size_cmdline; + selected_base = base_cmdline; + selected_limit = min_not_zero(limit_cmdline, limit); + if (base_cmdline + size_cmdline == limit_cmdline) + fixed = true; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES selected_size = size_bytes; @@ -129,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit) pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_contiguous_reserve_area(selected_size, 0, limit, - &dma_contiguous_default_area); + dma_contiguous_reserve_area(selected_size, selected_base, + selected_limit, + &dma_contiguous_default_area, + fixed); } -}; +} static DEFINE_MUTEX(cma_mutex); @@ -189,15 +209,20 @@ core_initcall(cma_init_reserved_areas); * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). * @res_cma: Pointer to store the created cma region. + * @fixed: hint about where to place the reserved area * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas for specific * devices. + * + * If @fixed is true, reserve contiguous area at exactly @base. If false, + * reserve in range from @base to @limit. */ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, - phys_addr_t limit, struct cma **res_cma) + phys_addr_t limit, struct cma **res_cma, + bool fixed) { struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; @@ -223,18 +248,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, limit &= ~(alignment - 1); /* Reserve memory */ - if (base) { + if (base && fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { - /* - * Use __memblock_alloc_base() since - * memblock_alloc_base() panic()s. - */ - phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); + phys_addr_t addr = memblock_alloc_range(size, alignment, base, + limit); if (!addr) { ret = -ENOMEM; goto err; diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 3b28f937d959..772eab5d524a 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -88,7 +88,8 @@ static inline void dma_contiguous_set_default(struct cma *cma) void dma_contiguous_reserve(phys_addr_t addr_limit); int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, - phys_addr_t limit, struct cma **res_cma); + phys_addr_t limit, struct cma **res_cma, + bool fixed); /** * dma_declare_contiguous() - reserve area for contiguous memory handling @@ -108,7 +109,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, { struct cma *cma; int ret; - ret = dma_contiguous_reserve_area(size, base, limit, &cma); + ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); if (ret == 0) dev_set_cma_area(dev, cma); @@ -136,7 +137,9 @@ static inline void dma_contiguous_set_default(struct cma *cma) { } static inline void dma_contiguous_reserve(phys_addr_t limit) { } static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, - phys_addr_t limit, struct cma **res_cma) { + phys_addr_t limit, struct cma **res_cma, + bool fixed) +{ return -ENOSYS; } -- cgit v1.2.3 From 02a8efeda894d3541c7143ed818b25b299504190 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 4 Jun 2014 16:06:59 -0700 Subject: include/linux/mmdebug.h: add VM_WARN_ON() and VM_WARN_ON_ONCE() WARN_ON() and WARN_ON_ONCE(), dependent on CONFIG_DEBUG_VM Cc: Sebastian Ott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2d57efa64cc1..a3499d7b0e8a 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -11,9 +11,13 @@ extern void dump_page_badflags(struct page *page, const char *reason, #define VM_BUG_ON(cond) BUG_ON(cond) #define VM_BUG_ON_PAGE(cond, page) \ do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) +#define VM_WARN_ON(cond) WARN_ON(cond) +#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL -- cgit v1.2.3 From e4f674229ce63dac60be0c4ddfb5ef8d1225d30d Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 4 Jun 2014 16:07:02 -0700 Subject: mm: pass VM_BUG_ON() reason to dump_page() I recently added a patch to let folks pass a "reason" string dump_page() which gets dumped out along with the page's data. This essentially saves the bug-reader a trip in to the source to figure out why we BUG_ON()'d. The new VM_BUG_ON_PAGE() passes in NULL for "reason". It seems like we might as well pass the BUG_ON() condition if we have it. This will bloat kernels a bit with ~160 new strings, but this is all under a debugging option anyway. page:ffffea0008560280 count:1 mapcount:0 mapping:(null) index:0x0 page flags: 0xbfffc0000000001(locked) page dumped because: VM_BUG_ON_PAGE(PageLocked(page)) ------------[ cut here ]------------ kernel BUG at /home/davehans/linux.git/mm/filemap.c:464! invalid opcode: 0000 [#1] SMP CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.0+ #251 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 ... [akpm@linux-foundation.org: include stringify.h] Signed-off-by: Dave Hansen Acked-by: Kirill A. Shutemov Acked-by: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index a3499d7b0e8a..edd82a105220 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -1,6 +1,8 @@ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 +#include + struct page; extern void dump_page(struct page *page, const char *reason); @@ -9,8 +11,13 @@ extern void dump_page_badflags(struct page *page, const char *reason, #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) -#define VM_BUG_ON_PAGE(cond, page) \ - do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) +#define VM_BUG_ON_PAGE(cond, page) \ + do { \ + if (unlikely(cond)) { \ + dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #else -- cgit v1.2.3 From bae7f4ae14d47008a11b4358b167cb0ae186c06a Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Wed, 4 Jun 2014 16:07:08 -0700 Subject: hugetlb: add hstate_is_gigantic() Signed-off-by: Luiz Capitulino Reviewed-by: Andrea Arcangeli Reviewed-by: Naoya Horiguchi Reviewed-by: Yasuaki Ishimatsu Reviewed-by: Davidlohr Bueso Acked-by: Kirill A. Shutemov Reviewed-by: Zhang Yanfei Cc: David Rientjes Cc: Marcelo Tosatti Cc: Rik van Riel Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 5 +++++ mm/hugetlb.c | 28 ++++++++++++++-------------- 2 files changed, 19 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d0bad1a8b0bd..35786ee36f06 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -343,6 +343,11 @@ static inline unsigned huge_page_shift(struct hstate *h) return h->order + PAGE_SHIFT; } +static inline bool hstate_is_gigantic(struct hstate *h) +{ + return huge_page_order(h) >= MAX_ORDER; +} + static inline unsigned int pages_per_huge_page(struct hstate *h) { return 1 << h->order; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5d54d4b8df01..a66310586894 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -611,7 +611,7 @@ static void update_and_free_page(struct hstate *h, struct page *page) { int i; - VM_BUG_ON(h->order >= MAX_ORDER); + VM_BUG_ON(hstate_is_gigantic(h)); h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; @@ -664,7 +664,7 @@ static void free_huge_page(struct page *page) if (restore_reserve) h->resv_huge_pages++; - if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { + if (h->surplus_huge_pages_node[nid] && !hstate_is_gigantic(h)) { /* remove the page from active list */ list_del(&page->lru); update_and_free_page(h, page); @@ -768,7 +768,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) { struct page *page; - if (h->order >= MAX_ORDER) + if (hstate_is_gigantic(h)) return NULL; page = alloc_pages_exact_node(nid, @@ -962,7 +962,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) struct page *page; unsigned int r_nid; - if (h->order >= MAX_ORDER) + if (hstate_is_gigantic(h)) return NULL; /* @@ -1155,7 +1155,7 @@ static void return_unused_surplus_pages(struct hstate *h, h->resv_huge_pages -= unused_resv_pages; /* Cannot return gigantic pages currently */ - if (h->order >= MAX_ORDER) + if (hstate_is_gigantic(h)) return; nr_pages = min(unused_resv_pages, h->surplus_huge_pages); @@ -1355,7 +1355,7 @@ static void __init gather_bootmem_prealloc(void) * fix confusing memory reports from free(1) and another * side-effects, like CommitLimit going negative. */ - if (h->order > (MAX_ORDER - 1)) + if (hstate_is_gigantic(h)) adjust_managed_page_count(page, 1 << h->order); } } @@ -1365,7 +1365,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) unsigned long i; for (i = 0; i < h->max_huge_pages; ++i) { - if (h->order >= MAX_ORDER) { + if (hstate_is_gigantic(h)) { if (!alloc_bootmem_huge_page(h)) break; } else if (!alloc_fresh_huge_page(h, @@ -1381,7 +1381,7 @@ static void __init hugetlb_init_hstates(void) for_each_hstate(h) { /* oversize hugepages were init'ed in early boot */ - if (h->order < MAX_ORDER) + if (!hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } } @@ -1415,7 +1415,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count, { int i; - if (h->order >= MAX_ORDER) + if (hstate_is_gigantic(h)) return; for_each_node_mask(i, *nodes_allowed) { @@ -1478,7 +1478,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, { unsigned long min_count, ret; - if (h->order >= MAX_ORDER) + if (hstate_is_gigantic(h)) return h->max_huge_pages; /* @@ -1605,7 +1605,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy, goto out; h = kobj_to_hstate(kobj, &nid); - if (h->order >= MAX_ORDER) { + if (hstate_is_gigantic(h)) { err = -EINVAL; goto out; } @@ -1688,7 +1688,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, unsigned long input; struct hstate *h = kobj_to_hstate(kobj, NULL); - if (h->order >= MAX_ORDER) + if (hstate_is_gigantic(h)) return -EINVAL; err = kstrtoul(buf, 10, &input); @@ -2112,7 +2112,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, tmp = h->max_huge_pages; - if (write && h->order >= MAX_ORDER) + if (write && hstate_is_gigantic(h)) return -EINVAL; table->data = &tmp; @@ -2168,7 +2168,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, tmp = h->nr_overcommit_huge_pages; - if (write && h->order >= MAX_ORDER) + if (write && hstate_is_gigantic(h)) return -EINVAL; table->data = &tmp; -- cgit v1.2.3 From 4f9b16a64753d0bb607454347036dc997fd03b82 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:07:14 -0700 Subject: mm: disable zone_reclaim_mode by default When it was introduced, zone_reclaim_mode made sense as NUMA distances punished and workloads were generally partitioned to fit into a NUMA node. NUMA machines are now common but few of the workloads are NUMA-aware and it's routine to see major performance degradation due to zone_reclaim_mode being enabled but relatively few can identify the problem. Those that require zone_reclaim_mode are likely to be able to detect when it needs to be enabled and tune appropriately so lets have a sensible default for the bulk of users. This patch (of 2): zone_reclaim_mode causes processes to prefer reclaiming memory from local node instead of spilling over to other nodes. This made sense initially when NUMA machines were almost exclusively HPC and the workload was partitioned into nodes. The NUMA penalties were sufficiently high to justify reclaiming the memory. On current machines and workloads it is often the case that zone_reclaim_mode destroys performance but not all users know how to detect this. Favour the common case and disable it by default. Users that are sophisticated enough to know they need zone_reclaim_mode will detect it. Signed-off-by: Mel Gorman Acked-by: Johannes Weiner Reviewed-by: Zhang Yanfei Acked-by: Michal Hocko Reviewed-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/vm.txt | 17 +++++++++-------- arch/ia64/include/asm/topology.h | 3 ++- arch/powerpc/include/asm/topology.h | 8 ++------ include/linux/topology.h | 3 ++- mm/page_alloc.c | 2 -- 5 files changed, 15 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index dd9d0e33b443..5b6da0fb5fbf 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -772,16 +772,17 @@ This is value ORed together of 2 = Zone reclaim writes dirty pages out 4 = Zone reclaim swaps pages -zone_reclaim_mode is set during bootup to 1 if it is determined that pages -from remote zones will cause a measurable performance reduction. The -page allocator will then reclaim easily reusable pages (those page -cache pages that are currently not used) before allocating off node pages. - -It may be beneficial to switch off zone reclaim if the system is -used for a file server and all of memory should be used for caching files -from disk. In that case the caching effect is more important than +zone_reclaim_mode is disabled by default. For file servers or workloads +that benefit from having their data cached, zone_reclaim_mode should be +left disabled as the caching effect is likely to be more important than data locality. +zone_reclaim may be enabled if it's known that the workload is partitioned +such that each partition fits within a NUMA node and that accessing remote +memory would cause a measurable performance reduction. The page allocator +will then reclaim easily reusable pages (those page cache pages that are +currently not used) before allocating off node pages. + Allowing zone reclaim to write out pages stops processes that are writing large amounts of data from dirtying pages on other nodes. Zone reclaim will write out dirty pages if a zone fills up and so effectively diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 3202aa74e0d6..6437ca21f61b 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -21,7 +21,8 @@ #define PENALTY_FOR_NODE_WITH_CPUS 255 /* - * Distance above which we begin to use zone reclaim + * Nodes within this distance are eligible for reclaim by zone_reclaim() when + * zone_reclaim_mode is enabled. */ #define RECLAIM_DISTANCE 15 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index c9202151079f..6c8a8c5a37a1 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -9,12 +9,8 @@ struct device_node; #ifdef CONFIG_NUMA /* - * Before going off node we want the VM to try and reclaim from the local - * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. - * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of - * 20, we never reclaim and go off node straight away. - * - * To fix this we choose a smaller value of RECLAIM_DISTANCE. + * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that + * all zones on all nodes will be eligible for zone_reclaim(). */ #define RECLAIM_DISTANCE 10 diff --git a/include/linux/topology.h b/include/linux/topology.h index 973671ff9e7d..dda6ee521e74 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -58,7 +58,8 @@ int arch_update_cpu_topology(void); /* * If the distance between nodes in a system is larger than RECLAIM_DISTANCE * (in whatever arch specific measurement units returned by node_distance()) - * then switch on zone reclaim on boot. + * and zone_reclaim_mode is enabled then the VM will only call zone_reclaim() + * on nodes within this distance. */ #define RECLAIM_DISTANCE 30 #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7cfdcd808f52..dfe954fbb48a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1860,8 +1860,6 @@ static void __paginginit init_zone_allows_reclaim(int nid) for_each_node_state(i, N_MEMORY) if (node_distance(nid, i) <= RECLAIM_DISTANCE) node_set(i, NODE_DATA(nid)->reclaim_nodes); - else - zone_reclaim_mode = 1; } #else /* CONFIG_NUMA */ -- cgit v1.2.3 From 5f7a75acdb24c7b9c436b3a0a66eec12e101d19c Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:07:15 -0700 Subject: mm: page_alloc: do not cache reclaim distances pgdat->reclaim_nodes tracks if a remote node is allowed to be reclaimed by zone_reclaim due to its distance. As it is expected that zone_reclaim_mode will be rarely enabled it is unreasonable for all machines to take a penalty. Fortunately, the zone_reclaim_mode() path is already slow and it is the path that takes the hit. Signed-off-by: Mel Gorman Acked-by: Johannes Weiner Reviewed-by: Zhang Yanfei Acked-by: Michal Hocko Reviewed-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 1 - mm/page_alloc.c | 17 ++--------------- 2 files changed, 2 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index fac5509c18f0..c1dbe0ba9f82 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -763,7 +763,6 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; - nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */ wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dfe954fbb48a..9f13bcfb6762 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1850,16 +1850,8 @@ static bool zone_local(struct zone *local_zone, struct zone *zone) static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) { - return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes); -} - -static void __paginginit init_zone_allows_reclaim(int nid) -{ - int i; - - for_each_node_state(i, N_MEMORY) - if (node_distance(nid, i) <= RECLAIM_DISTANCE) - node_set(i, NODE_DATA(nid)->reclaim_nodes); + return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < + RECLAIM_DISTANCE; } #else /* CONFIG_NUMA */ @@ -1893,9 +1885,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) return true; } -static inline void init_zone_allows_reclaim(int nid) -{ -} #endif /* CONFIG_NUMA */ /* @@ -4933,8 +4922,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; - if (node_state(nid, N_MEMORY)) - init_zone_allows_reclaim(nid); #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); #endif -- cgit v1.2.3 From bfc8c90139ebd049b9801a951db3b9a4a00bed9c Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:07:18 -0700 Subject: mem-hotplug: implement get/put_online_mems kmem_cache_{create,destroy,shrink} need to get a stable value of cpu/node online mask, because they init/destroy/access per-cpu/node kmem_cache parts, which can be allocated or destroyed on cpu/mem hotplug. To protect against cpu hotplug, these functions use {get,put}_online_cpus. However, they do nothing to synchronize with memory hotplug - taking the slab_mutex does not eliminate the possibility of race as described in patch 2. What we need there is something like get_online_cpus, but for memory. We already have lock_memory_hotplug, which serves for the purpose, but it's a bit of a hammer right now, because it's backed by a mutex. As a result, it imposes some limitations to locking order, which are not desirable, and can't be used just like get_online_cpus. That's why in patch 1 I substitute it with get/put_online_mems, which work exactly like get/put_online_cpus except they block not cpu, but memory hotplug. [ v1 can be found at https://lkml.org/lkml/2014/4/6/68. I NAK'ed it by myself, because it used an rw semaphore for get/put_online_mems, making them dead lock prune. ] This patch (of 2): {un}lock_memory_hotplug, which is used to synchronize against memory hotplug, is currently backed by a mutex, which makes it a bit of a hammer - threads that only want to get a stable value of online nodes mask won't be able to proceed concurrently. Also, it imposes some strong locking ordering rules on it, which narrows down the set of its usage scenarios. This patch introduces get/put_online_mems, which are the same as get/put_online_cpus, but for memory hotplug, i.e. executing a code inside a get/put_online_mems section will guarantee a stable value of online nodes, present pages, etc. lock_memory_hotplug()/unlock_memory_hotplug() are removed altogether. Signed-off-by: Vladimir Davydov Cc: Christoph Lameter Cc: Pekka Enberg Cc: Tang Chen Cc: Zhang Yanfei Cc: Toshi Kani Cc: Xishi Qiu Cc: Jiang Liu Cc: Rafael J. Wysocki Cc: David Rientjes Cc: Wen Congyang Cc: Yasuaki Ishimatsu Cc: Lai Jiangshan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 14 ++-- include/linux/mmzone.h | 8 +-- mm/kmemleak.c | 4 +- mm/memory-failure.c | 8 +-- mm/memory_hotplug.c | 142 +++++++++++++++++++++++++++++------------ mm/slub.c | 4 +- mm/vmscan.c | 2 +- 7 files changed, 116 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 4ca3d951fe91..010d125bffbf 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -187,14 +187,8 @@ extern void put_page_bootmem(struct page *page); extern void get_page_bootmem(unsigned long ingo, struct page *page, unsigned long type); -/* - * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug - * notifier will be called under this. 2) offline/online/add/remove memory - * will not run simultaneously. - */ - -void lock_memory_hotplug(void); -void unlock_memory_hotplug(void); +void get_online_mems(void); +void put_online_mems(void); #else /* ! CONFIG_MEMORY_HOTPLUG */ /* @@ -232,8 +226,8 @@ static inline int try_online_node(int nid) return 0; } -static inline void lock_memory_hotplug(void) {} -static inline void unlock_memory_hotplug(void) {} +static inline void get_online_mems(void) {} +static inline void put_online_mems(void) {} #endif /* ! CONFIG_MEMORY_HOTPLUG */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c1dbe0ba9f82..ae693e1ad0f9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -481,9 +481,8 @@ struct zone { * give them a chance of being in the same cacheline. * * Write access to present_pages at runtime should be protected by - * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't - * tolerant drift of present_pages should hold memory hotplug lock to - * get a stable value. + * mem_hotplug_begin/end(). Any reader who can't tolerant drift of + * present_pages should get_online_mems() to get a stable value. * * Read access to managed_pages should be safe because it's unsigned * long. Write access to zone->managed_pages and totalram_pages are @@ -765,7 +764,8 @@ typedef struct pglist_data { int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; - struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ + struct task_struct *kswapd; /* Protected by + mem_hotplug_begin/end() */ int kswapd_max_order; enum zone_type classzone_idx; #ifdef CONFIG_NUMA_BALANCING diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 8d2fcdfeff7f..736ade31d1dc 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1300,7 +1300,7 @@ static void kmemleak_scan(void) /* * Struct page scanning for each node. */ - lock_memory_hotplug(); + get_online_mems(); for_each_online_node(i) { unsigned long start_pfn = node_start_pfn(i); unsigned long end_pfn = node_end_pfn(i); @@ -1318,7 +1318,7 @@ static void kmemleak_scan(void) scan_block(page, page + 1, NULL, 1); } } - unlock_memory_hotplug(); + put_online_mems(); /* * Scanning the task stacks (may introduce false negatives). diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9ccef39a9de2..6917f799412b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1664,11 +1664,7 @@ int soft_offline_page(struct page *page, int flags) } } - /* - * The lock_memory_hotplug prevents a race with memory hotplug. - * This is a big hammer, a better would be nicer. - */ - lock_memory_hotplug(); + get_online_mems(); /* * Isolate the page, so that it doesn't get reallocated if it @@ -1679,7 +1675,7 @@ int soft_offline_page(struct page *page, int flags) set_migratetype_isolate(page, true); ret = get_any_page(page, pfn, flags); - unlock_memory_hotplug(); + put_online_mems(); if (ret > 0) { /* for in-use pages */ if (PageHuge(page)) ret = soft_offline_huge_page(page, flags); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a650db29606f..2906873a1502 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -46,19 +46,84 @@ static void generic_online_page(struct page *page); static online_page_callback_t online_page_callback = generic_online_page; +static DEFINE_MUTEX(online_page_callback_lock); -DEFINE_MUTEX(mem_hotplug_mutex); +/* The same as the cpu_hotplug lock, but for memory hotplug. */ +static struct { + struct task_struct *active_writer; + struct mutex lock; /* Synchronizes accesses to refcount, */ + /* + * Also blocks the new readers during + * an ongoing mem hotplug operation. + */ + int refcount; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} mem_hotplug = { + .active_writer = NULL, + .lock = __MUTEX_INITIALIZER(mem_hotplug.lock), + .refcount = 0, +#ifdef CONFIG_DEBUG_LOCK_ALLOC + .dep_map = {.name = "mem_hotplug.lock" }, +#endif +}; + +/* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */ +#define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map) +#define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map) +#define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map) + +void get_online_mems(void) +{ + might_sleep(); + if (mem_hotplug.active_writer == current) + return; + memhp_lock_acquire_read(); + mutex_lock(&mem_hotplug.lock); + mem_hotplug.refcount++; + mutex_unlock(&mem_hotplug.lock); + +} -void lock_memory_hotplug(void) +void put_online_mems(void) { - mutex_lock(&mem_hotplug_mutex); + if (mem_hotplug.active_writer == current) + return; + mutex_lock(&mem_hotplug.lock); + + if (WARN_ON(!mem_hotplug.refcount)) + mem_hotplug.refcount++; /* try to fix things up */ + + if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer)) + wake_up_process(mem_hotplug.active_writer); + mutex_unlock(&mem_hotplug.lock); + memhp_lock_release(); + } -void unlock_memory_hotplug(void) +static void mem_hotplug_begin(void) { - mutex_unlock(&mem_hotplug_mutex); + mem_hotplug.active_writer = current; + + memhp_lock_acquire(); + for (;;) { + mutex_lock(&mem_hotplug.lock); + if (likely(!mem_hotplug.refcount)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); + mutex_unlock(&mem_hotplug.lock); + schedule(); + } } +static void mem_hotplug_done(void) +{ + mem_hotplug.active_writer = NULL; + mutex_unlock(&mem_hotplug.lock); + memhp_lock_release(); +} /* add this memory to iomem resource */ static struct resource *register_memory_resource(u64 start, u64 size) @@ -727,14 +792,16 @@ int set_online_page_callback(online_page_callback_t callback) { int rc = -EINVAL; - lock_memory_hotplug(); + get_online_mems(); + mutex_lock(&online_page_callback_lock); if (online_page_callback == generic_online_page) { online_page_callback = callback; rc = 0; } - unlock_memory_hotplug(); + mutex_unlock(&online_page_callback_lock); + put_online_mems(); return rc; } @@ -744,14 +811,16 @@ int restore_online_page_callback(online_page_callback_t callback) { int rc = -EINVAL; - lock_memory_hotplug(); + get_online_mems(); + mutex_lock(&online_page_callback_lock); if (online_page_callback == callback) { online_page_callback = generic_online_page; rc = 0; } - unlock_memory_hotplug(); + mutex_unlock(&online_page_callback_lock); + put_online_mems(); return rc; } @@ -899,7 +968,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ int ret; struct memory_notify arg; - lock_memory_hotplug(); + mem_hotplug_begin(); /* * This doesn't need a lock to do pfn_to_page(). * The section can't be removed here because of the @@ -907,23 +976,18 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ */ zone = page_zone(pfn_to_page(pfn)); + ret = -EINVAL; if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && - !can_online_high_movable(zone)) { - unlock_memory_hotplug(); - return -EINVAL; - } + !can_online_high_movable(zone)) + goto out; if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { - if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { - unlock_memory_hotplug(); - return -EINVAL; - } + if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) + goto out; } if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { - if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { - unlock_memory_hotplug(); - return -EINVAL; - } + if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) + goto out; } /* Previous code may changed the zone of the pfn range */ @@ -939,8 +1003,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ ret = notifier_to_errno(ret); if (ret) { memory_notify(MEM_CANCEL_ONLINE, &arg); - unlock_memory_hotplug(); - return ret; + goto out; } /* * If this zone is not populated, then it is not in zonelist. @@ -964,8 +1027,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_ONLINE, &arg); - unlock_memory_hotplug(); - return ret; + goto out; } zone->present_pages += onlined_pages; @@ -995,9 +1057,9 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ if (onlined_pages) memory_notify(MEM_ONLINE, &arg); - unlock_memory_hotplug(); - - return 0; +out: + mem_hotplug_done(); + return ret; } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ @@ -1055,7 +1117,7 @@ int try_online_node(int nid) if (node_online(nid)) return 0; - lock_memory_hotplug(); + mem_hotplug_begin(); pgdat = hotadd_new_pgdat(nid, 0); if (!pgdat) { pr_err("Cannot online node %d due to NULL pgdat\n", nid); @@ -1073,7 +1135,7 @@ int try_online_node(int nid) } out: - unlock_memory_hotplug(); + mem_hotplug_done(); return ret; } @@ -1117,7 +1179,7 @@ int __ref add_memory(int nid, u64 start, u64 size) new_pgdat = !p; } - lock_memory_hotplug(); + mem_hotplug_begin(); new_node = !node_online(nid); if (new_node) { @@ -1158,7 +1220,7 @@ error: release_memory_resource(res); out: - unlock_memory_hotplug(); + mem_hotplug_done(); return ret; } EXPORT_SYMBOL_GPL(add_memory); @@ -1565,7 +1627,7 @@ static int __ref __offline_pages(unsigned long start_pfn, if (!test_pages_in_a_zone(start_pfn, end_pfn)) return -EINVAL; - lock_memory_hotplug(); + mem_hotplug_begin(); zone = page_zone(pfn_to_page(start_pfn)); node = zone_to_nid(zone); @@ -1672,7 +1734,7 @@ repeat: writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); - unlock_memory_hotplug(); + mem_hotplug_done(); return 0; failed_removal: @@ -1684,7 +1746,7 @@ failed_removal: undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); out: - unlock_memory_hotplug(); + mem_hotplug_done(); return ret; } @@ -1888,7 +1950,7 @@ void __ref remove_memory(int nid, u64 start, u64 size) BUG_ON(check_hotplug_memory_range(start, size)); - lock_memory_hotplug(); + mem_hotplug_begin(); /* * All memory blocks must be offlined before removing memory. Check @@ -1897,10 +1959,8 @@ void __ref remove_memory(int nid, u64 start, u64 size) */ ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, check_memblock_offlined_cb); - if (ret) { - unlock_memory_hotplug(); + if (ret) BUG(); - } /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); @@ -1909,7 +1969,7 @@ void __ref remove_memory(int nid, u64 start, u64 size) try_offline_node(nid); - unlock_memory_hotplug(); + mem_hotplug_done(); } EXPORT_SYMBOL_GPL(remove_memory); #endif /* CONFIG_MEMORY_HOTREMOVE */ diff --git a/mm/slub.c b/mm/slub.c index ddb60795f373..9cb2501a2960 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4332,7 +4332,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, } } - lock_memory_hotplug(); + get_online_mems(); #ifdef CONFIG_SLUB_DEBUG if (flags & SO_ALL) { for_each_node_state(node, N_NORMAL_MEMORY) { @@ -4372,7 +4372,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); #endif - unlock_memory_hotplug(); + put_online_mems(); kfree(nodes); return x + sprintf(buf + x, "\n"); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 7901cb749e17..fbcf46076c4f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3434,7 +3434,7 @@ int kswapd_run(int nid) /* * Called by memory hotplug when all memory in a node is offlined. Caller must - * hold lock_memory_hotplug(). + * hold mem_hotplug_begin/end(). */ void kswapd_stop(int nid) { -- cgit v1.2.3 From 2329d3751b082b4fd354f334a88662d72abac52d Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Wed, 4 Jun 2014 16:07:31 -0700 Subject: mm/swap.c: clean up *lru_cache_add* functions In mm/swap.c, __lru_cache_add() is exported, but actually there are no users outside this file. This patch unexports __lru_cache_add(), and makes it static. It also exports lru_cache_add_file(), as it is use by cifs and fuse, which can loaded as modules. Signed-off-by: Jianyu Zhan Cc: Minchan Kim Cc: Johannes Weiner Cc: Shaohua Li Cc: Bob Liu Cc: Seth Jennings Cc: Joonsoo Kim Cc: Rafael Aquini Cc: Mel Gorman Acked-by: Rik van Riel Cc: Andrea Arcangeli Cc: Khalid Aziz Cc: Christoph Hellwig Reviewed-by: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 19 ++----------------- mm/swap.c | 31 +++++++++++++++++++++++-------- 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 350711560753..5a14b928164e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -308,8 +308,9 @@ extern unsigned long nr_free_pagecache_pages(void); /* linux/mm/swap.c */ -extern void __lru_cache_add(struct page *); extern void lru_cache_add(struct page *); +extern void lru_cache_add_anon(struct page *page); +extern void lru_cache_add_file(struct page *page); extern void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *head); extern void activate_page(struct page *); @@ -323,22 +324,6 @@ extern void swap_setup(void); extern void add_page_to_unevictable_list(struct page *page); -/** - * lru_cache_add: add a page to the page lists - * @page: the page to add - */ -static inline void lru_cache_add_anon(struct page *page) -{ - ClearPageActive(page); - __lru_cache_add(page); -} - -static inline void lru_cache_add_file(struct page *page) -{ - ClearPageActive(page); - __lru_cache_add(page); -} - /* linux/mm/vmscan.c */ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); diff --git a/mm/swap.c b/mm/swap.c index 9ce43ba4498b..c0ed4d65438f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -582,13 +582,7 @@ void mark_page_accessed(struct page *page) } EXPORT_SYMBOL(mark_page_accessed); -/* - * Queue the page for addition to the LRU via pagevec. The decision on whether - * to add the page to the [in]active [file|anon] list is deferred until the - * pagevec is drained. This gives a chance for the caller of __lru_cache_add() - * have the page added to the active list using mark_page_accessed(). - */ -void __lru_cache_add(struct page *page) +static void __lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); @@ -598,11 +592,32 @@ void __lru_cache_add(struct page *page) pagevec_add(pvec, page); put_cpu_var(lru_add_pvec); } -EXPORT_SYMBOL(__lru_cache_add); + +/** + * lru_cache_add: add a page to the page lists + * @page: the page to add + */ +void lru_cache_add_anon(struct page *page) +{ + ClearPageActive(page); + __lru_cache_add(page); +} + +void lru_cache_add_file(struct page *page) +{ + ClearPageActive(page); + __lru_cache_add(page); +} +EXPORT_SYMBOL(lru_cache_add_file); /** * lru_cache_add - add a page to a page list * @page: the page to be added to the LRU. + * + * Queue the page for addition to the LRU via pagevec. The decision on whether + * to add the page to the [in]active [file|anon] list is deferred until the + * pagevec is drained. This gives a chance for the caller of lru_cache_add() + * have the page added to the active list using mark_page_accessed(). */ void lru_cache_add(struct page *page) { -- cgit v1.2.3 From f98bafa06a28fdfdd5c49f820f4d6560f636fc46 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 4 Jun 2014 16:07:34 -0700 Subject: memcg: kill CONFIG_MM_OWNER CONFIG_MM_OWNER makes no sense. It is not user-selectable, it is only selected by CONFIG_MEMCG automatically. So we can kill this option in init/Kconfig and do s/CONFIG_MM_OWNER/CONFIG_MEMCG/ globally. Signed-off-by: Oleg Nesterov Acked-by: Michal Hocko Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 2 +- include/linux/sched.h | 4 ++-- init/Kconfig | 7 ------- kernel/exit.c | 4 ++-- kernel/fork.c | 4 ++-- 5 files changed, 7 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8967e20cbe57..de1627232af0 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -406,7 +406,7 @@ struct mm_struct { spinlock_t ioctx_lock; struct kioctx_table __rcu *ioctx_table; #endif -#ifdef CONFIG_MM_OWNER +#ifdef CONFIG_MEMCG /* * "owner" points to a task that is regarded as the canonical * user/owner of this mm. All of the following must be true in diff --git a/include/linux/sched.h b/include/linux/sched.h index 70f67e4e6156..2f2dd7d932a2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2967,7 +2967,7 @@ static inline void inc_syscw(struct task_struct *tsk) #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_MM_OWNER +#ifdef CONFIG_MEMCG extern void mm_update_next_owner(struct mm_struct *mm); extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); #else @@ -2978,7 +2978,7 @@ static inline void mm_update_next_owner(struct mm_struct *mm) static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { } -#endif /* CONFIG_MM_OWNER */ +#endif /* CONFIG_MEMCG */ static inline unsigned long task_rlimit(const struct task_struct *tsk, unsigned int limit) diff --git a/init/Kconfig b/init/Kconfig index 4a1822a1a680..0a2f09a80e90 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -933,7 +933,6 @@ config RESOURCE_COUNTERS config MEMCG bool "Memory Resource Controller for Control Groups" depends on RESOURCE_COUNTERS - select MM_OWNER select EVENTFD help Provides a memory resource controller that manages both anonymous @@ -951,9 +950,6 @@ config MEMCG disable memory resource controller and you can avoid overheads. (and lose benefits of memory resource controller) - This config option also selects MM_OWNER config option, which - could in turn add some fork/exit overhead. - config MEMCG_SWAP bool "Memory Resource Controller Swap Extension" depends on MEMCG && SWAP @@ -1179,9 +1175,6 @@ config SCHED_AUTOGROUP desktop applications. Task group autogeneration is currently based upon task session. -config MM_OWNER - bool - config SYSFS_DEPRECATED bool "Enable deprecated sysfs features to support old userspace tools" depends on SYSFS diff --git a/kernel/exit.c b/kernel/exit.c index 6ed6a1d552b5..da1b838de8a6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -352,7 +352,7 @@ int disallow_signal(int sig) EXPORT_SYMBOL(disallow_signal); -#ifdef CONFIG_MM_OWNER +#ifdef CONFIG_MEMCG /* * A task is exiting. If it owned this mm, find a new owner for the mm. */ @@ -434,7 +434,7 @@ assign_new_owner: task_unlock(c); put_task_struct(c); } -#endif /* CONFIG_MM_OWNER */ +#endif /* CONFIG_MEMCG */ /* * Turn us into a lazy TLB process if we diff --git a/kernel/fork.c b/kernel/fork.c index 59e3dcc5b8f2..0d53eb0dfb6f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1099,12 +1099,12 @@ static void rt_mutex_init_task(struct task_struct *p) #endif } -#ifdef CONFIG_MM_OWNER +#ifdef CONFIG_MEMCG void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { mm->owner = p; } -#endif /* CONFIG_MM_OWNER */ +#endif /* CONFIG_MEMCG */ /* * Initialize POSIX timer handling for a single task. -- cgit v1.2.3 From 1e32e77f95d60b121b6072e3e3a650a7f93068f9 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:07:37 -0700 Subject: memcg, slab: do not schedule cache destruction when last page goes away This patchset is a part of preparations for kmemcg re-parenting. It targets at simplifying kmemcg work-flows and synchronization. First, it removes async per memcg cache destruction (see patches 1, 2). Now caches are only destroyed on memcg offline. That means the caches that are not empty on memcg offline will be leaked. However, they are already leaked, because memcg_cache_params::nr_pages normally never drops to 0 so the destruction work is never scheduled except kmem_cache_shrink is called explicitly. In the future I'm planning reaping such dead caches on vmpressure or periodically. Second, it substitutes per memcg slab_caches_mutex's with the global memcg_slab_mutex, which should be taken during the whole per memcg cache creation/destruction path before the slab_mutex (see patch 3). This greatly simplifies synchronization among various per memcg cache creation/destruction paths. I'm still not quite sure about the end picture, in particular I don't know whether we should reap dead memcgs' kmem caches periodically or try to merge them with their parents (see https://lkml.org/lkml/2014/4/20/38 for more details), but whichever way we choose, this set looks like a reasonable change to me, because it greatly simplifies kmemcg work-flows and eases further development. This patch (of 3): After a memcg is offlined, we mark its kmem caches that cannot be deleted right now due to pending objects as dead by setting the memcg_cache_params::dead flag, so that memcg_release_pages will schedule cache destruction (memcg_cache_params::destroy) as soon as the last slab of the cache is freed (memcg_cache_params::nr_pages drops to zero). I guess the idea was to destroy the caches as soon as possible, i.e. immediately after freeing the last object. However, it just doesn't work that way, because kmem caches always preserve some pages for the sake of performance, so that nr_pages never gets to zero unless the cache is shrunk explicitly using kmem_cache_shrink. Of course, we could account the total number of objects on the cache or check if all the slabs allocated for the cache are empty on kmem_cache_free and schedule destruction if so, but that would be too costly. Thus we have a piece of code that works only when we explicitly call kmem_cache_shrink, but complicates the whole picture a lot. Moreover, it's racy in fact. For instance, kmem_cache_shrink may free the last slab and thus schedule cache destruction before it finishes checking that the cache is empty, which can lead to use-after-free. So I propose to remove this async cache destruction from memcg_release_pages, and check if the cache is empty explicitly after calling kmem_cache_shrink instead. This will simplify things a lot w/o introducing any functional changes. And regarding dead memcg caches (i.e. those that are left hanging around after memcg offline for they have objects), I suppose we should reap them either periodically or on vmpressure as Glauber suggested initially. I'm going to implement this later. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Glauber Costa Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 1 - include/linux/slab.h | 2 -- mm/memcontrol.c | 63 ++-------------------------------------------- mm/slab.h | 7 ++---- 4 files changed, 4 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5155d09e749d..087a45314181 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -509,7 +509,6 @@ __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size); void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size); -void mem_cgroup_destroy_cache(struct kmem_cache *cachep); int __kmem_cache_destroy_memcg_children(struct kmem_cache *s); /** diff --git a/include/linux/slab.h b/include/linux/slab.h index a6aab2c0dfc5..905541dd3778 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -524,7 +524,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * @memcg: pointer to the memcg this cache belongs to * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from - * @dead: set to true after the memcg dies; the cache may still be around. * @nr_pages: number of pages that belongs to this cache. * @destroy: worker to be called whenever we are ready, or believe we may be * ready, to destroy this cache. @@ -540,7 +539,6 @@ struct memcg_cache_params { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; - bool dead; atomic_t nr_pages; struct work_struct destroy; }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9f4ff49c6add..6b1c45ced733 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3277,60 +3277,11 @@ static void kmem_cache_destroy_work_func(struct work_struct *w) cachep = memcg_params_to_cache(p); - /* - * If we get down to 0 after shrink, we could delete right away. - * However, memcg_release_pages() already puts us back in the workqueue - * in that case. If we proceed deleting, we'll get a dangling - * reference, and removing the object from the workqueue in that case - * is unnecessary complication. We are not a fast path. - * - * Note that this case is fundamentally different from racing with - * shrink_slab(): if memcg_cgroup_destroy_cache() is called in - * kmem_cache_shrink, not only we would be reinserting a dead cache - * into the queue, but doing so from inside the worker racing to - * destroy it. - * - * So if we aren't down to zero, we'll just schedule a worker and try - * again - */ - if (atomic_read(&cachep->memcg_params->nr_pages) != 0) - kmem_cache_shrink(cachep); - else + kmem_cache_shrink(cachep); + if (atomic_read(&cachep->memcg_params->nr_pages) == 0) kmem_cache_destroy(cachep); } -void mem_cgroup_destroy_cache(struct kmem_cache *cachep) -{ - if (!cachep->memcg_params->dead) - return; - - /* - * There are many ways in which we can get here. - * - * We can get to a memory-pressure situation while the delayed work is - * still pending to run. The vmscan shrinkers can then release all - * cache memory and get us to destruction. If this is the case, we'll - * be executed twice, which is a bug (the second time will execute over - * bogus data). In this case, cancelling the work should be fine. - * - * But we can also get here from the worker itself, if - * kmem_cache_shrink is enough to shake all the remaining objects and - * get the page count to 0. In this case, we'll deadlock if we try to - * cancel the work (the worker runs with an internal lock held, which - * is the same lock we would hold for cancel_work_sync().) - * - * Since we can't possibly know who got us here, just refrain from - * running if there is already work pending - */ - if (work_pending(&cachep->memcg_params->destroy)) - return; - /* - * We have to defer the actual destroying to a workqueue, because - * we might currently be in a context that cannot sleep. - */ - schedule_work(&cachep->memcg_params->destroy); -} - int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) { struct kmem_cache *c; @@ -3356,16 +3307,7 @@ int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) * We will now manually delete the caches, so to avoid races * we need to cancel all pending destruction workers and * proceed with destruction ourselves. - * - * kmem_cache_destroy() will call kmem_cache_shrink internally, - * and that could spawn the workers again: it is likely that - * the cache still have active pages until this very moment. - * This would lead us back to mem_cgroup_destroy_cache. - * - * But that will not execute at all if the "dead" flag is not - * set, so flip it down to guarantee we are in control. */ - c->memcg_params->dead = false; cancel_work_sync(&c->memcg_params->destroy); kmem_cache_destroy(c); @@ -3387,7 +3329,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) mutex_lock(&memcg->slab_caches_mutex); list_for_each_entry(params, &memcg->memcg_slab_caches, list) { cachep = memcg_params_to_cache(params); - cachep->memcg_params->dead = true; schedule_work(&cachep->memcg_params->destroy); } mutex_unlock(&memcg->slab_caches_mutex); diff --git a/mm/slab.h b/mm/slab.h index d85d59803d5f..b59447ac4533 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -129,11 +129,8 @@ static inline void memcg_bind_pages(struct kmem_cache *s, int order) static inline void memcg_release_pages(struct kmem_cache *s, int order) { - if (is_root_cache(s)) - return; - - if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) - mem_cgroup_destroy_cache(s); + if (!is_root_cache(s)) + atomic_sub(1 << order, &s->memcg_params->nr_pages); } static inline bool slab_equal_or_root(struct kmem_cache *s, -- cgit v1.2.3 From c67a8a685a6e9abbaf0235e084168f15a721ae39 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:07:39 -0700 Subject: memcg, slab: merge memcg_{bind,release}_pages to memcg_{un}charge_slab Currently we have two pairs of kmemcg-related functions that are called on slab alloc/free. The first is memcg_{bind,release}_pages that count the total number of pages allocated on a kmem cache. The second is memcg_{un}charge_slab that {un}charge slab pages to kmemcg resource counter. Let's just merge them to keep the code clean. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Glauber Costa Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 4 ++-- mm/memcontrol.c | 22 ++++++++++++++++++++-- mm/slab.c | 2 -- mm/slab.h | 25 ++----------------------- mm/slub.c | 2 -- 5 files changed, 24 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 087a45314181..d38d190f4cec 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -506,8 +506,8 @@ void memcg_update_array_size(int num_groups); struct kmem_cache * __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size); -void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size); +int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); +void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); int __kmem_cache_destroy_memcg_children(struct kmem_cache *s); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6b1c45ced733..86a2078805e5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2954,7 +2954,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) } #endif -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) +static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) { struct res_counter *fail_res; int ret = 0; @@ -2992,7 +2992,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) return ret; } -void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) +static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) { res_counter_uncharge(&memcg->res, size); if (do_swap_account) @@ -3390,6 +3390,24 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, __memcg_create_cache_enqueue(memcg, cachep); memcg_resume_kmem_account(); } + +int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order) +{ + int res; + + res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, + PAGE_SIZE << order); + if (!res) + atomic_add(1 << order, &cachep->memcg_params->nr_pages); + return res; +} + +void __memcg_uncharge_slab(struct kmem_cache *cachep, int order) +{ + memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order); + atomic_sub(1 << order, &cachep->memcg_params->nr_pages); +} + /* * Return the kmem_cache we're supposed to use for a slab allocation. * We try to use the current memcg's version of the cache. diff --git a/mm/slab.c b/mm/slab.c index 7067ea7f3927..9ca3b87edabc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1712,7 +1712,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, __SetPageSlab(page); if (page->pfmemalloc) SetPageSlabPfmemalloc(page); - memcg_bind_pages(cachep, cachep->gfporder); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); @@ -1748,7 +1747,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) page_mapcount_reset(page); page->mapping = NULL; - memcg_release_pages(cachep, cachep->gfporder); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; __free_pages(page, cachep->gfporder); diff --git a/mm/slab.h b/mm/slab.h index b59447ac4533..961a3fb1f5a2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -121,18 +121,6 @@ static inline bool is_root_cache(struct kmem_cache *s) return !s->memcg_params || s->memcg_params->is_root_cache; } -static inline void memcg_bind_pages(struct kmem_cache *s, int order) -{ - if (!is_root_cache(s)) - atomic_add(1 << order, &s->memcg_params->nr_pages); -} - -static inline void memcg_release_pages(struct kmem_cache *s, int order) -{ - if (!is_root_cache(s)) - atomic_sub(1 << order, &s->memcg_params->nr_pages); -} - static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { @@ -198,8 +186,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s, return 0; if (is_root_cache(s)) return 0; - return memcg_charge_kmem(s->memcg_params->memcg, gfp, - PAGE_SIZE << order); + return __memcg_charge_slab(s, gfp, order); } static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) @@ -208,7 +195,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) return; if (is_root_cache(s)) return; - memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order); + __memcg_uncharge_slab(s, order); } #else static inline bool is_root_cache(struct kmem_cache *s) @@ -216,14 +203,6 @@ static inline bool is_root_cache(struct kmem_cache *s) return true; } -static inline void memcg_bind_pages(struct kmem_cache *s, int order) -{ -} - -static inline void memcg_release_pages(struct kmem_cache *s, int order) -{ -} - static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { diff --git a/mm/slub.c b/mm/slub.c index 5d1b653183ab..9e288d7c5e6a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1422,7 +1422,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) order = compound_order(page); inc_slabs_node(s, page_to_nid(page), page->objects); - memcg_bind_pages(s, order); page->slab_cache = s; __SetPageSlab(page); if (page->pfmemalloc) @@ -1473,7 +1472,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __ClearPageSlabPfmemalloc(page); __ClearPageSlab(page); - memcg_release_pages(s, order); page_mapcount_reset(page); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; -- cgit v1.2.3 From bd67314586a3d5725e60f2f6587b4cb0f659bb67 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:07:40 -0700 Subject: memcg, slab: simplify synchronization scheme At present, we have the following mutexes protecting data related to per memcg kmem caches: - slab_mutex. This one is held during the whole kmem cache creation and destruction paths. We also take it when updating per root cache memcg_caches arrays (see memcg_update_all_caches). As a result, taking it guarantees there will be no changes to any kmem cache (including per memcg). Why do we need something else then? The point is it is private to slab implementation and has some internal dependencies with other mutexes (get_online_cpus). So we just don't want to rely upon it and prefer to introduce additional mutexes instead. - activate_kmem_mutex. Initially it was added to synchronize initializing kmem limit (memcg_activate_kmem). However, since we can grow per root cache memcg_caches arrays only on kmem limit initialization (see memcg_update_all_caches), we also employ it to protect against memcg_caches arrays relocation (e.g. see __kmem_cache_destroy_memcg_children). - We have a convention not to take slab_mutex in memcontrol.c, but we want to walk over per memcg memcg_slab_caches lists there (e.g. for destroying all memcg caches on offline). So we have per memcg slab_caches_mutex's protecting those lists. The mutexes are taken in the following order: activate_kmem_mutex -> slab_mutex -> memcg::slab_caches_mutex Such a syncrhonization scheme has a number of flaws, for instance: - We can't call kmem_cache_{destroy,shrink} while walking over a memcg::memcg_slab_caches list due to locking order. As a result, in mem_cgroup_destroy_all_caches we schedule the memcg_cache_params::destroy work shrinking and destroying the cache. - We don't have a mutex to synchronize per memcg caches destruction between memcg offline (mem_cgroup_destroy_all_caches) and root cache destruction (__kmem_cache_destroy_memcg_children). Currently we just don't bother about it. This patch simplifies it by substituting per memcg slab_caches_mutex's with the global memcg_slab_mutex. It will be held whenever a new per memcg cache is created or destroyed, so it protects per root cache memcg_caches arrays and per memcg memcg_slab_caches lists. The locking order is following: activate_kmem_mutex -> memcg_slab_mutex -> slab_mutex This allows us to call kmem_cache_{create,shrink,destroy} under the memcg_slab_mutex. As a result, we don't need memcg_cache_params::destroy work any more - we can simply destroy caches while iterating over a per memcg slab caches list. Also using the global mutex simplifies synchronization between concurrent per memcg caches creation/destruction, e.g. mem_cgroup_destroy_all_caches vs __kmem_cache_destroy_memcg_children. The downside of this is that we substitute per-memcg slab_caches_mutex's with a hummer-like global mutex, but since we already take either the slab_mutex or the cgroup_mutex along with a memcg::slab_caches_mutex, it shouldn't hurt concurrency a lot. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Glauber Costa Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 10 --- include/linux/slab.h | 6 +- mm/memcontrol.c | 150 ++++++++++++++++++--------------------------- mm/slab_common.c | 23 +++---- 4 files changed, 69 insertions(+), 120 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index d38d190f4cec..1fa23244fe37 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -497,8 +497,6 @@ char *memcg_create_cache_name(struct mem_cgroup *memcg, int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache); void memcg_free_cache_params(struct kmem_cache *s); -void memcg_register_cache(struct kmem_cache *s); -void memcg_unregister_cache(struct kmem_cache *s); int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); @@ -640,14 +638,6 @@ static inline void memcg_free_cache_params(struct kmem_cache *s) { } -static inline void memcg_register_cache(struct kmem_cache *s) -{ -} - -static inline void memcg_unregister_cache(struct kmem_cache *s) -{ -} - static inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 905541dd3778..ecbec9ccb80d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -116,7 +116,8 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(void *)); #ifdef CONFIG_MEMCG_KMEM -void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *); +struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *, + struct kmem_cache *); #endif void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); @@ -525,8 +526,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from * @nr_pages: number of pages that belongs to this cache. - * @destroy: worker to be called whenever we are ready, or believe we may be - * ready, to destroy this cache. */ struct memcg_cache_params { bool is_root_cache; @@ -540,7 +539,6 @@ struct memcg_cache_params { struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; - struct work_struct destroy; }; }; }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 86a2078805e5..6b448881422b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -357,10 +357,9 @@ struct mem_cgroup { struct cg_proto tcp_mem; #endif #if defined(CONFIG_MEMCG_KMEM) - /* analogous to slab_common's slab_caches list. per-memcg */ + /* analogous to slab_common's slab_caches list, but per-memcg; + * protected by memcg_slab_mutex */ struct list_head memcg_slab_caches; - /* Not a spinlock, we can take a lot of time walking the list */ - struct mutex slab_caches_mutex; /* Index in the kmem_cache->memcg_params->memcg_caches array */ int kmemcg_id; #endif @@ -2913,6 +2912,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, static DEFINE_MUTEX(set_limit_mutex); #ifdef CONFIG_MEMCG_KMEM +/* + * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or + * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists. + */ +static DEFINE_MUTEX(memcg_slab_mutex); + static DEFINE_MUTEX(activate_kmem_mutex); static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) @@ -2945,10 +2950,10 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) print_slabinfo_header(m); - mutex_lock(&memcg->slab_caches_mutex); + mutex_lock(&memcg_slab_mutex); list_for_each_entry(params, &memcg->memcg_slab_caches, list) cache_show(memcg_params_to_cache(params), m); - mutex_unlock(&memcg->slab_caches_mutex); + mutex_unlock(&memcg_slab_mutex); return 0; } @@ -3050,8 +3055,6 @@ void memcg_update_array_size(int num) memcg_limited_groups_array_size = memcg_caches_array_size(num); } -static void kmem_cache_destroy_work_func(struct work_struct *w); - int memcg_update_cache_size(struct kmem_cache *s, int num_groups) { struct memcg_cache_params *cur_params = s->memcg_params; @@ -3148,8 +3151,6 @@ int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, if (memcg) { s->memcg_params->memcg = memcg; s->memcg_params->root_cache = root_cache; - INIT_WORK(&s->memcg_params->destroy, - kmem_cache_destroy_work_func); css_get(&memcg->css); } else s->memcg_params->is_root_cache = true; @@ -3166,24 +3167,34 @@ void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } -void memcg_register_cache(struct kmem_cache *s) +static void memcg_kmem_create_cache(struct mem_cgroup *memcg, + struct kmem_cache *root_cache) { - struct kmem_cache *root; - struct mem_cgroup *memcg; + struct kmem_cache *cachep; int id; - if (is_root_cache(s)) + lockdep_assert_held(&memcg_slab_mutex); + + id = memcg_cache_id(memcg); + + /* + * Since per-memcg caches are created asynchronously on first + * allocation (see memcg_kmem_get_cache()), several threads can try to + * create the same cache, but only one of them may succeed. + */ + if (cache_from_memcg_idx(root_cache, id)) return; + cachep = kmem_cache_create_memcg(memcg, root_cache); /* - * Holding the slab_mutex assures nobody will touch the memcg_caches - * array while we are modifying it. + * If we could not create a memcg cache, do not complain, because + * that's not critical at all as we can always proceed with the root + * cache. */ - lockdep_assert_held(&slab_mutex); + if (!cachep) + return; - root = s->memcg_params->root_cache; - memcg = s->memcg_params->memcg; - id = memcg_cache_id(memcg); + list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); /* * Since readers won't lock (see cache_from_memcg_idx()), we need a @@ -3192,49 +3203,30 @@ void memcg_register_cache(struct kmem_cache *s) */ smp_wmb(); - /* - * Initialize the pointer to this cache in its parent's memcg_params - * before adding it to the memcg_slab_caches list, otherwise we can - * fail to convert memcg_params_to_cache() while traversing the list. - */ - VM_BUG_ON(root->memcg_params->memcg_caches[id]); - root->memcg_params->memcg_caches[id] = s; - - mutex_lock(&memcg->slab_caches_mutex); - list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); - mutex_unlock(&memcg->slab_caches_mutex); + BUG_ON(root_cache->memcg_params->memcg_caches[id]); + root_cache->memcg_params->memcg_caches[id] = cachep; } -void memcg_unregister_cache(struct kmem_cache *s) +static void memcg_kmem_destroy_cache(struct kmem_cache *cachep) { - struct kmem_cache *root; + struct kmem_cache *root_cache; struct mem_cgroup *memcg; int id; - if (is_root_cache(s)) - return; + lockdep_assert_held(&memcg_slab_mutex); - /* - * Holding the slab_mutex assures nobody will touch the memcg_caches - * array while we are modifying it. - */ - lockdep_assert_held(&slab_mutex); + BUG_ON(is_root_cache(cachep)); - root = s->memcg_params->root_cache; - memcg = s->memcg_params->memcg; + root_cache = cachep->memcg_params->root_cache; + memcg = cachep->memcg_params->memcg; id = memcg_cache_id(memcg); - mutex_lock(&memcg->slab_caches_mutex); - list_del(&s->memcg_params->list); - mutex_unlock(&memcg->slab_caches_mutex); + BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep); + root_cache->memcg_params->memcg_caches[id] = NULL; - /* - * Clear the pointer to this cache in its parent's memcg_params only - * after removing it from the memcg_slab_caches list, otherwise we can - * fail to convert memcg_params_to_cache() while traversing the list. - */ - VM_BUG_ON(root->memcg_params->memcg_caches[id] != s); - root->memcg_params->memcg_caches[id] = NULL; + list_del(&cachep->memcg_params->list); + + kmem_cache_destroy(cachep); } /* @@ -3268,70 +3260,42 @@ static inline void memcg_resume_kmem_account(void) current->memcg_kmem_skip_account--; } -static void kmem_cache_destroy_work_func(struct work_struct *w) -{ - struct kmem_cache *cachep; - struct memcg_cache_params *p; - - p = container_of(w, struct memcg_cache_params, destroy); - - cachep = memcg_params_to_cache(p); - - kmem_cache_shrink(cachep); - if (atomic_read(&cachep->memcg_params->nr_pages) == 0) - kmem_cache_destroy(cachep); -} - int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) { struct kmem_cache *c; int i, failed = 0; - /* - * If the cache is being destroyed, we trust that there is no one else - * requesting objects from it. Even if there are, the sanity checks in - * kmem_cache_destroy should caught this ill-case. - * - * Still, we don't want anyone else freeing memcg_caches under our - * noses, which can happen if a new memcg comes to life. As usual, - * we'll take the activate_kmem_mutex to protect ourselves against - * this. - */ - mutex_lock(&activate_kmem_mutex); + mutex_lock(&memcg_slab_mutex); for_each_memcg_cache_index(i) { c = cache_from_memcg_idx(s, i); if (!c) continue; - /* - * We will now manually delete the caches, so to avoid races - * we need to cancel all pending destruction workers and - * proceed with destruction ourselves. - */ - cancel_work_sync(&c->memcg_params->destroy); - kmem_cache_destroy(c); + memcg_kmem_destroy_cache(c); if (cache_from_memcg_idx(s, i)) failed++; } - mutex_unlock(&activate_kmem_mutex); + mutex_unlock(&memcg_slab_mutex); return failed; } static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) { struct kmem_cache *cachep; - struct memcg_cache_params *params; + struct memcg_cache_params *params, *tmp; if (!memcg_kmem_is_active(memcg)) return; - mutex_lock(&memcg->slab_caches_mutex); - list_for_each_entry(params, &memcg->memcg_slab_caches, list) { + mutex_lock(&memcg_slab_mutex); + list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) { cachep = memcg_params_to_cache(params); - schedule_work(&cachep->memcg_params->destroy); + kmem_cache_shrink(cachep); + if (atomic_read(&cachep->memcg_params->nr_pages) == 0) + memcg_kmem_destroy_cache(cachep); } - mutex_unlock(&memcg->slab_caches_mutex); + mutex_unlock(&memcg_slab_mutex); } struct create_work { @@ -3346,7 +3310,10 @@ static void memcg_create_cache_work_func(struct work_struct *w) struct mem_cgroup *memcg = cw->memcg; struct kmem_cache *cachep = cw->cachep; - kmem_cache_create_memcg(memcg, cachep); + mutex_lock(&memcg_slab_mutex); + memcg_kmem_create_cache(memcg, cachep); + mutex_unlock(&memcg_slab_mutex); + css_put(&memcg->css); kfree(cw); } @@ -5022,13 +4989,14 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg, * Make sure we have enough space for this cgroup in each root cache's * memcg_params. */ + mutex_lock(&memcg_slab_mutex); err = memcg_update_all_caches(memcg_id + 1); + mutex_unlock(&memcg_slab_mutex); if (err) goto out_rmid; memcg->kmemcg_id = memcg_id; INIT_LIST_HEAD(&memcg->memcg_slab_caches); - mutex_init(&memcg->slab_caches_mutex); /* * We couldn't have accounted to this cgroup, because it hasn't got the diff --git a/mm/slab_common.c b/mm/slab_common.c index 2dd920dc3776..7e348cff814d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -160,7 +160,6 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align, s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_register_cache(s); out: if (err) return ERR_PTR(err); @@ -270,9 +269,10 @@ EXPORT_SYMBOL(kmem_cache_create); * requests going from @memcg to @root_cache. The new cache inherits properties * from its parent. */ -void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache) +struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *memcg, + struct kmem_cache *root_cache) { - struct kmem_cache *s; + struct kmem_cache *s = NULL; char *cache_name; get_online_cpus(); @@ -280,14 +280,6 @@ void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_c mutex_lock(&slab_mutex); - /* - * Since per-memcg caches are created asynchronously on first - * allocation (see memcg_kmem_get_cache()), several threads can try to - * create the same cache, but only one of them may succeed. - */ - if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg))) - goto out_unlock; - cache_name = memcg_create_cache_name(memcg, root_cache); if (!cache_name) goto out_unlock; @@ -296,14 +288,18 @@ void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_c root_cache->size, root_cache->align, root_cache->flags, root_cache->ctor, memcg, root_cache); - if (IS_ERR(s)) + if (IS_ERR(s)) { kfree(cache_name); + s = NULL; + } out_unlock: mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); + + return s; } static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) @@ -348,11 +344,8 @@ void kmem_cache_destroy(struct kmem_cache *s) goto out_unlock; list_del(&s->list); - memcg_unregister_cache(s); - if (__kmem_cache_shutdown(s) != 0) { list_add(&s->list, &slab_caches); - memcg_register_cache(s); printk(KERN_ERR "kmem_cache_destroy %s: " "Slab cache still has objects\n", s->name); dump_stack(); -- cgit v1.2.3 From 1b938c0827478df268d2336469ec48d400a2eb3e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 4 Jun 2014 16:07:43 -0700 Subject: fs/buffer.c: remove block_write_full_page_endio() The last in-tree caller of block_write_full_page_endio() was removed in January 2013. It's time to remove the EXPORT_SYMBOL, which leaves block_write_full_page() as the only caller of block_write_full_page_endio(), so inline block_write_full_page_endio() into block_write_full_page(). Signed-off-by: Matthew Wilcox Cc: Hugh Dickins Cc: Dave Chinner Cc: Dheeraj Reddy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 21 +++++---------------- fs/ext4/page-io.c | 2 +- fs/ocfs2/file.c | 2 +- include/linux/buffer_head.h | 2 -- 4 files changed, 7 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/buffer.c b/fs/buffer.c index 6a8110c03a47..e33f8d5452ad 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2879,10 +2879,9 @@ EXPORT_SYMBOL(block_truncate_page); /* * The generic ->writepage function for buffer-backed address_spaces - * this form passes in the end_io handler used to finish the IO. */ -int block_write_full_page_endio(struct page *page, get_block_t *get_block, - struct writeback_control *wbc, bh_end_io_t *handler) +int block_write_full_page(struct page *page, get_block_t *get_block, + struct writeback_control *wbc) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); @@ -2892,7 +2891,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, /* Is the page fully inside i_size? */ if (page->index < end_index) return __block_write_full_page(inode, page, get_block, wbc, - handler); + end_buffer_async_write); /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); @@ -2915,18 +2914,8 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, * writes to that region are not written out to the file." */ zero_user_segment(page, offset, PAGE_CACHE_SIZE); - return __block_write_full_page(inode, page, get_block, wbc, handler); -} -EXPORT_SYMBOL(block_write_full_page_endio); - -/* - * The generic ->writepage function for buffer-backed address_spaces - */ -int block_write_full_page(struct page *page, get_block_t *get_block, - struct writeback_control *wbc) -{ - return block_write_full_page_endio(page, get_block, wbc, - end_buffer_async_write); + return __block_write_full_page(inode, page, get_block, wbc, + end_buffer_async_write); } EXPORT_SYMBOL(block_write_full_page); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index c18d95b50540..1a64e7a52b84 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -429,7 +429,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, block_start = bh_offset(bh); if (block_start >= len) { /* - * Comments copied from block_write_full_page_endio: + * Comments copied from block_write_full_page: * * The page straddles i_size. It must be zeroed out on * each and every writepage invocation because it may diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8970dcf74de5..8eb6e5732d3b 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -828,7 +828,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, /* * fs-writeback will release the dirty pages without page lock * whose offset are over inode size, the release happens at - * block_write_full_page_endio(). + * block_write_full_page(). */ i_size_write(inode, abs_to); inode->i_blocks = ocfs2_inode_sector_count(inode); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 7cbf837a279c..324329ceea1e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -207,8 +207,6 @@ void block_invalidatepage(struct page *page, unsigned int offset, unsigned int length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); -int block_write_full_page_endio(struct page *page, get_block_t *get_block, - struct writeback_control *wbc, bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); int block_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); -- cgit v1.2.3 From 57d998456ae8680ed446aa1993f45f4d8a9a5973 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 4 Jun 2014 16:07:45 -0700 Subject: fs/mpage.c: factor page_endio() out of mpage_end_io() page_endio() takes care of updating all the appropriate page flags once I/O has finished to a page. Switch to using mapping_set_error() instead of setting AS_EIO directly; this will handle thin-provisioned devices correctly. Signed-off-by: Matthew Wilcox Cc: Dave Chinner Cc: Dheeraj Reddy Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/mpage.c | 18 +----------------- include/linux/pagemap.h | 2 ++ mm/filemap.c | 25 +++++++++++++++++++++++++ 3 files changed, 28 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/fs/mpage.c b/fs/mpage.c index 4cc9c5d079f7..10da0da73017 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -48,23 +48,7 @@ static void mpage_end_io(struct bio *bio, int err) bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; - - if (bio_data_dir(bio) == READ) { - if (!err) { - SetPageUptodate(page); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - unlock_page(page); - } else { /* bio_data_dir(bio) == WRITE */ - if (err) { - SetPageError(page); - if (page->mapping) - set_bit(AS_EIO, &page->mapping->flags); - } - end_page_writeback(page); - } + page_endio(page, bio_data_dir(bio), err); } bio_put(bio); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 45598f1e9aa3..718214c5584e 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -425,6 +425,8 @@ static inline void wait_on_page_writeback(struct page *page) extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); +void page_endio(struct page *page, int rw, int err); + /* * Add an arbitrary waiter to a page's wait queue */ diff --git a/mm/filemap.c b/mm/filemap.c index 021056c324e6..47d235b357a7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -764,6 +764,31 @@ void end_page_writeback(struct page *page) } EXPORT_SYMBOL(end_page_writeback); +/* + * After completing I/O on a page, call this routine to update the page + * flags appropriately + */ +void page_endio(struct page *page, int rw, int err) +{ + if (rw == READ) { + if (!err) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + } else { /* rw == WRITE */ + if (err) { + SetPageError(page); + if (page->mapping) + mapping_set_error(page->mapping, err); + } + end_page_writeback(page); + } +} +EXPORT_SYMBOL_GPL(page_endio); + /** * __lock_page - get a lock on the page, assuming we need to sleep to get it * @page: the page to lock -- cgit v1.2.3 From 47a191fd38ebddb1bd1510ec2bc1085c578c8868 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 4 Jun 2014 16:07:46 -0700 Subject: fs/block_dev.c: add bdev_read_page() and bdev_write_page() A block device driver may choose to provide a rw_page operation. These will be called when the filesystem is attempting to do page sized I/O to page cache pages (ie not for direct I/O). This does preclude I/Os that are larger than page size, so this may only be a performance gain for some devices. Signed-off-by: Matthew Wilcox Tested-by: Dheeraj Reddy Cc: Dave Chinner Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/block_dev.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/mpage.c | 12 ++++++++++ include/linux/blkdev.h | 4 ++++ 3 files changed, 79 insertions(+) (limited to 'include/linux') diff --git a/fs/block_dev.c b/fs/block_dev.c index 552a8d13bc32..83fba15cc394 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -363,6 +363,69 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) } EXPORT_SYMBOL(blkdev_fsync); +/** + * bdev_read_page() - Start reading a page from a block device + * @bdev: The device to read the page from + * @sector: The offset on the device to read the page to (need not be aligned) + * @page: The page to read + * + * On entry, the page should be locked. It will be unlocked when the page + * has been read. If the block driver implements rw_page synchronously, + * that will be true on exit from this function, but it need not be. + * + * Errors returned by this function are usually "soft", eg out of memory, or + * queue full; callers should try a different route to read this page rather + * than propagate an error back up the stack. + * + * Return: negative errno if an error occurs, 0 if submission was successful. + */ +int bdev_read_page(struct block_device *bdev, sector_t sector, + struct page *page) +{ + const struct block_device_operations *ops = bdev->bd_disk->fops; + if (!ops->rw_page) + return -EOPNOTSUPP; + return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); +} +EXPORT_SYMBOL_GPL(bdev_read_page); + +/** + * bdev_write_page() - Start writing a page to a block device + * @bdev: The device to write the page to + * @sector: The offset on the device to write the page to (need not be aligned) + * @page: The page to write + * @wbc: The writeback_control for the write + * + * On entry, the page should be locked and not currently under writeback. + * On exit, if the write started successfully, the page will be unlocked and + * under writeback. If the write failed already (eg the driver failed to + * queue the page to the device), the page will still be locked. If the + * caller is a ->writepage implementation, it will need to unlock the page. + * + * Errors returned by this function are usually "soft", eg out of memory, or + * queue full; callers should try a different route to write this page rather + * than propagate an error back up the stack. + * + * Return: negative errno if an error occurs, 0 if submission was successful. + */ +int bdev_write_page(struct block_device *bdev, sector_t sector, + struct page *page, struct writeback_control *wbc) +{ + int result; + int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; + const struct block_device_operations *ops = bdev->bd_disk->fops; + if (!ops->rw_page) + return -EOPNOTSUPP; + set_page_writeback(page); + result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); + if (result) + end_page_writeback(page); + else + unlock_page(page); + return result; +} +EXPORT_SYMBOL_GPL(bdev_write_page); + /* * pseudo-fs */ diff --git a/fs/mpage.c b/fs/mpage.c index 10da0da73017..5f9ed622274f 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -269,6 +269,11 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, alloc_new: if (bio == NULL) { + if (first_hole == blocks_per_page) { + if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), + page)) + goto out; + } bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), min_t(int, nr_pages, bio_get_nr_vecs(bdev)), GFP_KERNEL); @@ -587,6 +592,13 @@ page_is_mapped: alloc_new: if (bio == NULL) { + if (first_unmapped == blocks_per_page) { + if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), + page, wbc)) { + clean_buffers(page, first_unmapped); + goto out; + } + } bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); if (bio == NULL) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 45cf6e537c83..2f3886e6cc78 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1588,6 +1588,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) struct block_device_operations { int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*direct_access) (struct block_device *, sector_t, @@ -1606,6 +1607,9 @@ struct block_device_operations { extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long); +extern int bdev_read_page(struct block_device *, sector_t, struct page *); +extern int bdev_write_page(struct block_device *, sector_t, struct page *, + struct writeback_control *); #else /* CONFIG_BLOCK */ struct block_device; -- cgit v1.2.3 From f7f28ca98b9a7a99fc55df2dddcf49857ab004f0 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Wed, 4 Jun 2014 16:07:57 -0700 Subject: mm: constify nmask argument to mbind() The nmask argument to mbind() is const according to the userspace header numaif.h, and since the kernel does indeed not modify it, it might as well be declared const in the kernel. Signed-off-by: Rasmus Villemoes Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/syscalls.h | 2 +- mm/mempolicy.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a4a0588c5397..bfef0be279dd 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -723,7 +723,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, int flags); asmlinkage long sys_mbind(unsigned long start, unsigned long len, unsigned long mode, - unsigned long __user *nmask, + const unsigned long __user *nmask, unsigned long maxnode, unsigned flags); asmlinkage long sys_get_mempolicy(int __user *policy, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 78e1472933ea..727187f1155b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1362,7 +1362,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, - unsigned long, mode, unsigned long __user *, nmask, + unsigned long, mode, const unsigned long __user *, nmask, unsigned long, maxnode, unsigned, flags) { nodemask_t nodes; -- cgit v1.2.3 From 23c8902d403ef9a04cdc367d0b76a3ed6d83f5c5 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Wed, 4 Jun 2014 16:07:58 -0700 Subject: mm: constify nmask argument to set_mempolicy() The nmask argument to set_mempolicy() is const according to the user-space header numaif.h, and since the kernel does indeed not modify it, it might as well be declared const in the kernel. Signed-off-by: Rasmus Villemoes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/syscalls.h | 2 +- mm/mempolicy.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bfef0be279dd..b0881a0ed322 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -711,7 +711,7 @@ asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, asmlinkage long sys_ioprio_set(int which, int who, int ioprio); asmlinkage long sys_ioprio_get(int which, int who); -asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, +asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask, unsigned long maxnode); asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *from, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 727187f1155b..b09586d8316b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1383,7 +1383,7 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, } /* Set the process memory policy */ -SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, +SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, unsigned long, maxnode) { int err; -- cgit v1.2.3 From d2ee40eae98d8a41ff27dcdd13b1b656c4c1ad00 Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Wed, 4 Jun 2014 16:08:02 -0700 Subject: mm: introdule compound_head_by_tail() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, in put_compound_page(), we have ====== if (likely(!PageTail(page))) { <------ (1) if (put_page_testzero(page)) { /* ¦* By the time all refcounts have been released ¦* split_huge_page cannot run anymore from under us. ¦*/ if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } return; } /* __split_huge_page_refcount can run under us */ page_head = compound_head(page); <------------ (2) ====== if at (1) , we fail the check, this means page is *likely* a tail page. Then at (2), as compoud_head(page) is inlined, it is : ====== static inline struct page *compound_head(struct page *page) { if (unlikely(PageTail(page))) { <----------- (3) struct page *head = page->first_page; smp_rmb(); if (likely(PageTail(page))) return head; } return page; } ====== here, the (3) unlikely in the case is a negative hint, because it is *likely* a tail page. So the check (3) in this case is not good, so I introduce a helper for this case. So this patch introduces compound_head_by_tail() which deals with a possible tail page(though it could be spilt by a racy thread), and make compound_head() a wrapper on it. This patch has no functional change, and it reduces the object size slightly: text data bss dec hex filename 11003 1328 16 12347 303b mm/swap.o.orig 10971 1328 16 12315 301b mm/swap.o.patched I've ran "perf top -e branch-miss" to observe branch-miss in this case. As Michael points out, it's a slow path, so only very few times this case happens. But I grep'ed the code base, and found there still are some other call sites could be benifited from this helper. And given that it only bloating up the source by only 5 lines, but with a reduced object size. I still believe this helper deserves to exsit. Signed-off-by: Jianyu Zhan Cc: Kirill A. Shutemov Cc: Rik van Riel Cc: Jiang Liu Cc: Peter Zijlstra Cc: Johannes Weiner Cc: Mel Gorman Cc: Andrea Arcangeli Cc: Sasha Levin Cc: Wanpeng Li Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 29 +++++++++++++++++------------ mm/swap.c | 2 +- 2 files changed, 18 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index d6777060449f..368600628d14 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -407,20 +407,25 @@ static inline void compound_unlock_irqrestore(struct page *page, #endif } +static inline struct page *compound_head_by_tail(struct page *tail) +{ + struct page *head = tail->first_page; + + /* + * page->first_page may be a dangling pointer to an old + * compound page, so recheck that it is still a tail + * page before returning. + */ + smp_rmb(); + if (likely(PageTail(tail))) + return head; + return tail; +} + static inline struct page *compound_head(struct page *page) { - if (unlikely(PageTail(page))) { - struct page *head = page->first_page; - - /* - * page->first_page may be a dangling pointer to an old - * compound page, so recheck that it is still a tail - * page before returning. - */ - smp_rmb(); - if (likely(PageTail(page))) - return head; - } + if (unlikely(PageTail(page))) + return compound_head_by_tail(page); return page; } diff --git a/mm/swap.c b/mm/swap.c index d089c5a0cf98..c8d6df556ce6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -253,7 +253,7 @@ static void put_compound_page(struct page *page) * Case 3 is possible, as we may race with * __split_huge_page_refcount tearing down a THP page. */ - page_head = compound_head(page); + page_head = compound_head_by_tail(page); if (!__compound_tail_refcounted(page_head)) put_unrefcounted_compound_page(page_head, page); else -- cgit v1.2.3 From 1754e44e8291c92b9d981a6eca59f28dd25f03ab Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Wed, 4 Jun 2014 16:08:04 -0700 Subject: include/linux/bootmem.h: cleanup the comment for BOOTMEM_ flags Use BOOTMEM_DEFAULT instead of 0 in the comment. Signed-off-by: Wang Sheng-Hui Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/bootmem.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index db51fe4fe317..4e2bd4c95b66 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -58,9 +58,9 @@ extern void free_bootmem_late(unsigned long physaddr, unsigned long size); * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, * the architecture-specific code should honor this). * - * If flags is 0, then the return value is always 0 (success). If - * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the - * memory already was reserved. + * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success). + * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory + * already was reserved. */ #define BOOTMEM_DEFAULT 0 #define BOOTMEM_EXCLUSIVE (1<<0) -- cgit v1.2.3 From ac7695012a6f3269acd80d6c2b2218a6769edbf3 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 4 Jun 2014 16:08:17 -0700 Subject: mm/rmap.c: make page_referenced_one() and try_to_unmap_one() static KSM was converted to use rmap_walk() and now nobody uses these functions outside mm/rmap.c. Let's covert them back to static. Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 4 ---- mm/rmap.c | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b66c2110cb1f..9be55c7617da 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -183,14 +183,10 @@ static inline void page_dup_rmap(struct page *page) */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -int page_referenced_one(struct page *, struct vm_area_struct *, - unsigned long address, void *arg); #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) int try_to_unmap(struct page *, enum ttu_flags flags); -int try_to_unmap_one(struct page *, struct vm_area_struct *, - unsigned long address, void *arg); /* * Called from mm/filemap_xip.c to unmap empty zero page diff --git a/mm/rmap.c b/mm/rmap.c index 7da400d5d98e..8754e1fa83b6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -671,7 +671,7 @@ struct page_referenced_arg { /* * arg: page_referenced_arg will be passed */ -int page_referenced_one(struct page *page, struct vm_area_struct *vma, +static int page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; @@ -1114,7 +1114,7 @@ out: /* * @arg: enum ttu_flags will be passed to this argument */ -int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, +static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; -- cgit v1.2.3 From 3fb1c8dcfcda2f5bfb7d79d8b08bf2f04b1eed8f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 4 Jun 2014 16:08:20 -0700 Subject: mm: update comment for DEFAULT_MAX_MAP_COUNT With ELF extended numbering 16-bit bound is not hard limit any more. [akpm@linux-foundation.org: fix typo] Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched/sysctl.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 8045a554cafb..596a0e007c62 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -25,6 +25,10 @@ enum { sysctl_hung_task_timeout_secs = 0 }; * Because the kernel adds some informative sections to a image of program at * generating coredump, we need some margin. The number of extra sections is * 1-3 now and depends on arch. We use "5" as safe margin, here. + * + * ELF extended numbering allows more than 65535 sections, so 16-bit bound is + * not a hard limit any more. Although some userspace tools can be surprised by + * that. */ #define MAPCOUNT_ELF_CORE_MARGIN (5) #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) -- cgit v1.2.3 From 073ee1c6cd11cd190f4d0da84d9b4ba79d7b9e70 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:08:23 -0700 Subject: memcg: get rid of memcg_create_cache_name Instead of calling back to memcontrol.c from kmem_cache_create_memcg in order to just create the name of a per memcg cache, let's allocate it in place. We only need to pass the memcg name to kmem_cache_create_memcg for that - everything else can be done in slab_common.c. Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 -- include/linux/slab.h | 3 ++- mm/memcontrol.c | 33 +++++++++------------------------ mm/slab_common.c | 7 +++++-- 4 files changed, 16 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1fa23244fe37..dfc2929a3877 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -492,8 +492,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -char *memcg_create_cache_name(struct mem_cgroup *memcg, - struct kmem_cache *root_cache); int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache); void memcg_free_cache_params(struct kmem_cache *s); diff --git a/include/linux/slab.h b/include/linux/slab.h index ecbec9ccb80d..86e5b26fbdab 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -117,7 +117,8 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, void (*)(void *)); #ifdef CONFIG_MEMCG_KMEM struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *, - struct kmem_cache *); + struct kmem_cache *, + const char *); #endif void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 971d7b643f6e..7df7f599e3df 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3095,29 +3095,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return 0; } -char *memcg_create_cache_name(struct mem_cgroup *memcg, - struct kmem_cache *root_cache) -{ - static char *buf; - - /* - * We need a mutex here to protect the shared buffer. Since this is - * expected to be called only on cache creation, we can employ the - * slab_mutex for that purpose. - */ - lockdep_assert_held(&slab_mutex); - - if (!buf) { - buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); - if (!buf) - return NULL; - } - - cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1); - return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name, - memcg_cache_id(memcg), buf); -} - int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache) { @@ -3158,6 +3135,7 @@ void memcg_free_cache_params(struct kmem_cache *s) static void memcg_kmem_create_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { + static char *memcg_name_buf; /* protected by memcg_slab_mutex */ struct kmem_cache *cachep; int id; @@ -3173,7 +3151,14 @@ static void memcg_kmem_create_cache(struct mem_cgroup *memcg, if (cache_from_memcg_idx(root_cache, id)) return; - cachep = kmem_cache_create_memcg(memcg, root_cache); + if (!memcg_name_buf) { + memcg_name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); + if (!memcg_name_buf) + return; + } + + cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1); + cachep = kmem_cache_create_memcg(memcg, root_cache, memcg_name_buf); /* * If we could not create a memcg cache, do not complain, because * that's not critical at all as we can always proceed with the root diff --git a/mm/slab_common.c b/mm/slab_common.c index 7e348cff814d..32175617cb75 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -264,13 +264,15 @@ EXPORT_SYMBOL(kmem_cache_create); * kmem_cache_create_memcg - Create a cache for a memory cgroup. * @memcg: The memory cgroup the new cache is for. * @root_cache: The parent of the new cache. + * @memcg_name: The name of the memory cgroup (used for naming the new cache). * * This function attempts to create a kmem cache that will serve allocation * requests going from @memcg to @root_cache. The new cache inherits properties * from its parent. */ struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *memcg, - struct kmem_cache *root_cache) + struct kmem_cache *root_cache, + const char *memcg_name) { struct kmem_cache *s = NULL; char *cache_name; @@ -280,7 +282,8 @@ struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *memcg, mutex_lock(&slab_mutex); - cache_name = memcg_create_cache_name(memcg, root_cache); + cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name, + memcg_cache_id(memcg), memcg_name); if (!cache_name) goto out_unlock; -- cgit v1.2.3 From 68711a746345c44ae00c64d8dbac6a9ce13ac54a Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 4 Jun 2014 16:08:25 -0700 Subject: mm, migration: add destination page freeing callback Memory migration uses a callback defined by the caller to determine how to allocate destination pages. When migration fails for a source page, however, it frees the destination page back to the system. This patch adds a memory migration callback defined by the caller to determine how to free destination pages. If a caller, such as memory compaction, builds its own freelist for migration targets, this can reuse already freed memory instead of scanning additional memory. If the caller provides a function to handle freeing of destination pages, it is called when page migration fails. If the caller passes NULL then freeing back to the system will be handled as usual. This patch introduces no functional change. Signed-off-by: David Rientjes Reviewed-by: Naoya Horiguchi Acked-by: Mel Gorman Acked-by: Vlastimil Babka Cc: Greg Thelen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 11 ++++++---- mm/compaction.c | 2 +- mm/memory-failure.c | 4 ++-- mm/memory_hotplug.c | 2 +- mm/mempolicy.c | 4 ++-- mm/migrate.c | 55 +++++++++++++++++++++++++++++++++++-------------- mm/page_alloc.c | 2 +- 7 files changed, 53 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 84a31ad0b791..a2901c414664 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -5,7 +5,9 @@ #include #include -typedef struct page *new_page_t(struct page *, unsigned long private, int **); +typedef struct page *new_page_t(struct page *page, unsigned long private, + int **reason); +typedef void free_page_t(struct page *page, unsigned long private); /* * Return values from addresss_space_operations.migratepage(): @@ -38,7 +40,7 @@ enum migrate_reason { extern void putback_movable_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); -extern int migrate_pages(struct list_head *l, new_page_t x, +extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); extern int migrate_prep(void); @@ -56,8 +58,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping, #else static inline void putback_movable_pages(struct list_head *l) {} -static inline int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private, enum migrate_mode mode, int reason) +static inline int migrate_pages(struct list_head *l, new_page_t new, + free_page_t free, unsigned long private, enum migrate_mode mode, + int reason) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; } diff --git a/mm/compaction.c b/mm/compaction.c index 6010aabde28c..f74a362d2e28 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1016,7 +1016,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } nr_migrate = cc->nr_migratepages; - err = migrate_pages(&cc->migratepages, compaction_alloc, + err = migrate_pages(&cc->migratepages, compaction_alloc, NULL, (unsigned long)cc, cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, MR_COMPACTION); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d50f17fb9be2..3cd1b652821c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1503,7 +1503,7 @@ static int soft_offline_huge_page(struct page *page, int flags) /* Keep page count to indicate a given hugepage is isolated. */ list_move(&hpage->lru, &pagelist); - ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, + ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { pr_info("soft offline: %#lx: migration failed %d, type %lx\n", @@ -1584,7 +1584,7 @@ static int __soft_offline_page(struct page *page, int flags) inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); - ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, + ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { if (!list_empty(&pagelist)) { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index cbb7ca0ac44b..469bbf505f85 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1394,7 +1394,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * alloc_migrate_target should be improooooved!! * migrate_pages returns # of failed pages. */ - ret = migrate_pages(&source, alloc_migrate_target, 0, + ret = migrate_pages(&source, alloc_migrate_target, NULL, 0, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); if (ret) putback_movable_pages(&source); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 7f7864b95e8e..16bc9fa42998 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1028,7 +1028,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) { - err = migrate_pages(&pagelist, new_node_page, dest, + err = migrate_pages(&pagelist, new_node_page, NULL, dest, MIGRATE_SYNC, MR_SYSCALL); if (err) putback_movable_pages(&pagelist); @@ -1277,7 +1277,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma, + NULL, (unsigned long)vma, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); if (nr_failed) putback_movable_pages(&pagelist); diff --git a/mm/migrate.c b/mm/migrate.c index 6247be7fa30e..2a459675eeab 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -938,8 +938,9 @@ out: * Obtain the lock on page, remove all ptes and migrate the page * to the newly allocated page in newpage. */ -static int unmap_and_move(new_page_t get_new_page, unsigned long private, - struct page *page, int force, enum migrate_mode mode) +static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, + unsigned long private, struct page *page, int force, + enum migrate_mode mode) { int rc = 0; int *result = NULL; @@ -983,11 +984,17 @@ out: page_is_file_cache(page)); putback_lru_page(page); } + /* - * Move the new page to the LRU. If migration was not successful - * then this will free the page. + * If migration was not successful and there's a freeing callback, use + * it. Otherwise, putback_lru_page() will drop the reference grabbed + * during isolation. */ - putback_lru_page(newpage); + if (rc != MIGRATEPAGE_SUCCESS && put_new_page) + put_new_page(newpage, private); + else + putback_lru_page(newpage); + if (result) { if (rc) *result = rc; @@ -1016,8 +1023,9 @@ out: * will wait in the page fault for migration to complete. */ static int unmap_and_move_huge_page(new_page_t get_new_page, - unsigned long private, struct page *hpage, - int force, enum migrate_mode mode) + free_page_t put_new_page, unsigned long private, + struct page *hpage, int force, + enum migrate_mode mode) { int rc = 0; int *result = NULL; @@ -1056,20 +1064,30 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (!page_mapped(hpage)) rc = move_to_new_page(new_hpage, hpage, 1, mode); - if (rc) + if (rc != MIGRATEPAGE_SUCCESS) remove_migration_ptes(hpage, hpage); if (anon_vma) put_anon_vma(anon_vma); - if (!rc) + if (rc == MIGRATEPAGE_SUCCESS) hugetlb_cgroup_migrate(hpage, new_hpage); unlock_page(hpage); out: if (rc != -EAGAIN) putback_active_hugepage(hpage); - put_page(new_hpage); + + /* + * If migration was not successful and there's a freeing callback, use + * it. Otherwise, put_page() will drop the reference grabbed during + * isolation. + */ + if (rc != MIGRATEPAGE_SUCCESS && put_new_page) + put_new_page(new_hpage, private); + else + put_page(new_hpage); + if (result) { if (rc) *result = rc; @@ -1086,6 +1104,8 @@ out: * @from: The list of pages to be migrated. * @get_new_page: The function used to allocate free pages to be used * as the target of the page migration. + * @put_new_page: The function used to free target pages if migration + * fails, or NULL if no special handling is necessary. * @private: Private data to be passed on to get_new_page() * @mode: The migration mode that specifies the constraints for * page migration, if any. @@ -1099,7 +1119,8 @@ out: * Returns the number of pages that were not migrated, or an error code. */ int migrate_pages(struct list_head *from, new_page_t get_new_page, - unsigned long private, enum migrate_mode mode, int reason) + free_page_t put_new_page, unsigned long private, + enum migrate_mode mode, int reason) { int retry = 1; int nr_failed = 0; @@ -1121,10 +1142,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, if (PageHuge(page)) rc = unmap_and_move_huge_page(get_new_page, - private, page, pass > 2, mode); + put_new_page, private, page, + pass > 2, mode); else - rc = unmap_and_move(get_new_page, private, - page, pass > 2, mode); + rc = unmap_and_move(get_new_page, put_new_page, + private, page, pass > 2, mode); switch(rc) { case -ENOMEM: @@ -1273,7 +1295,7 @@ set_status: err = 0; if (!list_empty(&pagelist)) { - err = migrate_pages(&pagelist, new_page_node, + err = migrate_pages(&pagelist, new_page_node, NULL, (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); if (err) putback_movable_pages(&pagelist); @@ -1729,7 +1751,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, list_add(&page->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, - node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); + NULL, node, MIGRATE_ASYNC, + MR_NUMA_MISPLACED); if (nr_remaining) { if (!list_empty(&migratepages)) { list_del(&page->lru); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 132c337dbe55..027d0294413a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6218,7 +6218,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, cc->nr_migratepages -= nr_reclaimed; ret = migrate_pages(&cc->migratepages, alloc_migrate_target, - 0, MIGRATE_SYNC, MR_CMA); + NULL, 0, MIGRATE_SYNC, MR_CMA); } if (ret < 0) { putback_movable_pages(&cc->migratepages); -- cgit v1.2.3 From 35979ef3393110ff3c12c6b94552208d3bdf1a36 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 4 Jun 2014 16:08:27 -0700 Subject: mm, compaction: add per-zone migration pfn cache for async compaction Each zone has a cached migration scanner pfn for memory compaction so that subsequent calls to memory compaction can start where the previous call left off. Currently, the compaction migration scanner only updates the per-zone cached pfn when pageblocks were not skipped for async compaction. This creates a dependency on calling sync compaction to avoid having subsequent calls to async compaction from scanning an enormous amount of non-MOVABLE pageblocks each time it is called. On large machines, this could be potentially very expensive. This patch adds a per-zone cached migration scanner pfn only for async compaction. It is updated everytime a pageblock has been scanned in its entirety and when no pages from it were successfully isolated. The cached migration scanner pfn for sync compaction is updated only when called for sync compaction. Signed-off-by: David Rientjes Acked-by: Vlastimil Babka Reviewed-by: Naoya Horiguchi Cc: Greg Thelen Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 5 ++-- mm/compaction.c | 66 ++++++++++++++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ae693e1ad0f9..10a96ee68311 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -360,9 +360,10 @@ struct zone { /* Set to true when the PG_migrate_skip bits should be cleared */ bool compact_blockskip_flush; - /* pfns where compaction scanners should start */ + /* pfn where compaction free scanner should start */ unsigned long compact_cached_free_pfn; - unsigned long compact_cached_migrate_pfn; + /* pfn where async and sync compaction migration scanner should start */ + unsigned long compact_cached_migrate_pfn[2]; #endif #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ diff --git a/mm/compaction.c b/mm/compaction.c index d0c7c994e11b..70c0f8cda33f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone) unsigned long end_pfn = zone_end_pfn(zone); unsigned long pfn; - zone->compact_cached_migrate_pfn = start_pfn; + zone->compact_cached_migrate_pfn[0] = start_pfn; + zone->compact_cached_migrate_pfn[1] = start_pfn; zone->compact_cached_free_pfn = end_pfn; zone->compact_blockskip_flush = false; @@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat) */ static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, - bool migrate_scanner) + bool set_unsuitable, bool migrate_scanner) { struct zone *zone = cc->zone; + unsigned long pfn; if (cc->ignore_skip_hint) return; @@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc, if (!page) return; - if (!nr_isolated) { - unsigned long pfn = page_to_pfn(page); + if (nr_isolated) + return; + + /* + * Only skip pageblocks when all forms of compaction will be known to + * fail in the near future. + */ + if (set_unsuitable) set_pageblock_skip(page); - /* Update where compaction should restart */ - if (migrate_scanner) { - if (!cc->finished_update_migrate && - pfn > zone->compact_cached_migrate_pfn) - zone->compact_cached_migrate_pfn = pfn; - } else { - if (!cc->finished_update_free && - pfn < zone->compact_cached_free_pfn) - zone->compact_cached_free_pfn = pfn; - } + pfn = page_to_pfn(page); + + /* Update where async and sync compaction should restart */ + if (migrate_scanner) { + if (cc->finished_update_migrate) + return; + if (pfn > zone->compact_cached_migrate_pfn[0]) + zone->compact_cached_migrate_pfn[0] = pfn; + if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1]) + zone->compact_cached_migrate_pfn[1] = pfn; + } else { + if (cc->finished_update_free) + return; + if (pfn < zone->compact_cached_free_pfn) + zone->compact_cached_free_pfn = pfn; } } #else @@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc, static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, - bool migrate_scanner) + bool set_unsuitable, bool migrate_scanner) { } #endif /* CONFIG_COMPACTION */ @@ -323,7 +336,8 @@ isolate_fail: /* Update the pageblock-skip if the whole pageblock was scanned */ if (blockpfn == end_pfn) - update_pageblock_skip(cc, valid_page, total_isolated, false); + update_pageblock_skip(cc, valid_page, total_isolated, true, + false); count_compact_events(COMPACTFREE_SCANNED, nr_scanned); if (total_isolated) @@ -458,7 +472,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, unsigned long flags; bool locked = false; struct page *page = NULL, *valid_page = NULL; - bool skipped_async_unsuitable = false; + bool set_unsuitable = true; const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | (unevictable ? ISOLATE_UNEVICTABLE : 0); @@ -535,8 +549,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, */ mt = get_pageblock_migratetype(page); if (!cc->sync && !migrate_async_suitable(mt)) { - cc->finished_update_migrate = true; - skipped_async_unsuitable = true; + set_unsuitable = false; goto next_pageblock; } } @@ -640,11 +653,10 @@ next_pageblock: /* * Update the pageblock-skip information and cached scanner pfn, * if the whole pageblock was scanned without isolating any page. - * This is not done when pageblock was skipped due to being unsuitable - * for async compaction, so that eventual sync compaction can try. */ - if (low_pfn == end_pfn && !skipped_async_unsuitable) - update_pageblock_skip(cc, valid_page, nr_isolated, true); + if (low_pfn == end_pfn) + update_pageblock_skip(cc, valid_page, nr_isolated, + set_unsuitable, true); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); @@ -868,7 +880,8 @@ static int compact_finished(struct zone *zone, /* Compaction run completes if the migrate and free scanner meet */ if (cc->free_pfn <= cc->migrate_pfn) { /* Let the next compaction start anew. */ - zone->compact_cached_migrate_pfn = zone->zone_start_pfn; + zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; + zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; zone->compact_cached_free_pfn = zone_end_pfn(zone); /* @@ -993,7 +1006,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) * information on where the scanners should start but check that it * is initialised by ensuring the values are within zone boundaries. */ - cc->migrate_pfn = zone->compact_cached_migrate_pfn; + cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync]; cc->free_pfn = zone->compact_cached_free_pfn; if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); @@ -1001,7 +1014,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { cc->migrate_pfn = start_pfn; - zone->compact_cached_migrate_pfn = cc->migrate_pfn; + zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; + zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); -- cgit v1.2.3 From e0b9daeb453e602a95ea43853dc12d385558ce1f Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 4 Jun 2014 16:08:28 -0700 Subject: mm, compaction: embed migration mode in compact_control We're going to want to manipulate the migration mode for compaction in the page allocator, and currently compact_control's sync field is only a bool. Currently, we only do MIGRATE_ASYNC or MIGRATE_SYNC_LIGHT compaction depending on the value of this bool. Convert the bool to enum migrate_mode and pass the migration mode in directly. Later, we'll want to avoid MIGRATE_SYNC_LIGHT for thp allocations in the pagefault patch to avoid unnecessary latency. This also alters compaction triggered from sysfs, either for the entire system or for a node, to force MIGRATE_SYNC. [akpm@linux-foundation.org: fix build] [iamjoonsoo.kim@lge.com: use MIGRATE_SYNC in alloc_contig_range()] Signed-off-by: David Rientjes Suggested-by: Mel Gorman Acked-by: Vlastimil Babka Cc: Greg Thelen Cc: Naoya Horiguchi Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 4 ++-- mm/compaction.c | 36 +++++++++++++++++++----------------- mm/internal.h | 2 +- mm/page_alloc.c | 39 +++++++++++++++++---------------------- 4 files changed, 39 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 7e1c76e3cd68..01e3132820da 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - bool sync, bool *contended); + enum migrate_mode mode, bool *contended); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -91,7 +91,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended) + enum migrate_mode mode, bool *contended) { return COMPACT_CONTINUE; } diff --git a/mm/compaction.c b/mm/compaction.c index 70c0f8cda33f..217a6ad9a20e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -161,7 +161,8 @@ static void update_pageblock_skip(struct compact_control *cc, return; if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; - if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1]) + if (cc->mode != MIGRATE_ASYNC && + pfn > zone->compact_cached_migrate_pfn[1]) zone->compact_cached_migrate_pfn[1] = pfn; } else { if (cc->finished_update_free) @@ -208,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, } /* async aborts if taking too long or contended */ - if (!cc->sync) { + if (cc->mode == MIGRATE_ASYNC) { cc->contended = true; return false; } @@ -473,7 +474,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, bool locked = false; struct page *page = NULL, *valid_page = NULL; bool set_unsuitable = true; - const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | + const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ? + ISOLATE_ASYNC_MIGRATE : 0) | (unevictable ? ISOLATE_UNEVICTABLE : 0); /* @@ -483,7 +485,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, */ while (unlikely(too_many_isolated(zone))) { /* async migration should just abort */ - if (!cc->sync) + if (cc->mode == MIGRATE_ASYNC) return 0; congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -548,7 +550,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, * the minimum amount of work satisfies the allocation */ mt = get_pageblock_migratetype(page); - if (!cc->sync && !migrate_async_suitable(mt)) { + if (cc->mode == MIGRATE_ASYNC && + !migrate_async_suitable(mt)) { set_unsuitable = false; goto next_pageblock; } @@ -981,6 +984,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) int ret; unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(zone); + const bool sync = cc->mode != MIGRATE_ASYNC; ret = compaction_suitable(zone, cc->order); switch (ret) { @@ -1006,7 +1010,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) * information on where the scanners should start but check that it * is initialised by ensuring the values are within zone boundaries. */ - cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync]; + cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; cc->free_pfn = zone->compact_cached_free_pfn; if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); @@ -1040,8 +1044,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) nr_migrate = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, - compaction_free, (unsigned long)cc, - cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, + compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION); update_nr_listpages(cc); nr_remaining = cc->nr_migratepages; @@ -1074,9 +1077,8 @@ out: return ret; } -static unsigned long compact_zone_order(struct zone *zone, - int order, gfp_t gfp_mask, - bool sync, bool *contended) +static unsigned long compact_zone_order(struct zone *zone, int order, + gfp_t gfp_mask, enum migrate_mode mode, bool *contended) { unsigned long ret; struct compact_control cc = { @@ -1085,7 +1087,7 @@ static unsigned long compact_zone_order(struct zone *zone, .order = order, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, - .sync = sync, + .mode = mode, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -1107,7 +1109,7 @@ int sysctl_extfrag_threshold = 500; * @order: The order of the current allocation * @gfp_mask: The GFP mask of the current allocation * @nodemask: The allowed nodes to allocate from - * @sync: Whether migration is synchronous or not + * @mode: The migration mode for async, sync light, or sync migration * @contended: Return value that is true if compaction was aborted due to lock contention * @page: Optionally capture a free page of the requested order during compaction * @@ -1115,7 +1117,7 @@ int sysctl_extfrag_threshold = 500; */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended) + enum migrate_mode mode, bool *contended) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; @@ -1140,7 +1142,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - status = compact_zone_order(zone, order, gfp_mask, sync, + status = compact_zone_order(zone, order, gfp_mask, mode, contended); rc = max(status, rc); @@ -1190,7 +1192,7 @@ void compact_pgdat(pg_data_t *pgdat, int order) { struct compact_control cc = { .order = order, - .sync = false, + .mode = MIGRATE_ASYNC, }; if (!order) @@ -1203,7 +1205,7 @@ static void compact_node(int nid) { struct compact_control cc = { .order = -1, - .sync = true, + .mode = MIGRATE_SYNC, .ignore_skip_hint = true, }; diff --git a/mm/internal.h b/mm/internal.h index 6ee580d69ddd..a25424a24e0c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -134,7 +134,7 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ - bool sync; /* Synchronous migration */ + enum migrate_mode mode; /* Async or sync migration mode */ bool ignore_skip_hint; /* Scan blocks even if marked skip */ bool finished_update_free; /* True when the zone cached pfns are * no longer being updated diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 027d0294413a..afb29da0576c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2217,7 +2217,7 @@ static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int migratetype, bool sync_migration, + int migratetype, enum migrate_mode mode, bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { @@ -2231,7 +2231,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, - nodemask, sync_migration, + nodemask, mode, contended_compaction); current->flags &= ~PF_MEMALLOC; @@ -2264,7 +2264,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, * As async compaction considers a subset of pageblocks, only * defer if the failure was a sync compaction failure. */ - if (sync_migration) + if (mode != MIGRATE_ASYNC) defer_compaction(preferred_zone, order); cond_resched(); @@ -2277,9 +2277,8 @@ static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int migratetype, bool sync_migration, - bool *contended_compaction, bool *deferred_compaction, - unsigned long *did_some_progress) + int migratetype, enum migrate_mode mode, bool *contended_compaction, + bool *deferred_compaction, unsigned long *did_some_progress) { return NULL; } @@ -2474,7 +2473,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int alloc_flags; unsigned long pages_reclaimed = 0; unsigned long did_some_progress; - bool sync_migration = false; + enum migrate_mode migration_mode = MIGRATE_ASYNC; bool deferred_compaction = false; bool contended_compaction = false; @@ -2568,17 +2567,15 @@ rebalance: * Try direct compaction. The first pass is asynchronous. Subsequent * attempts after direct reclaim are synchronous */ - page = __alloc_pages_direct_compact(gfp_mask, order, - zonelist, high_zoneidx, - nodemask, - alloc_flags, preferred_zone, - migratetype, sync_migration, - &contended_compaction, + page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, + high_zoneidx, nodemask, alloc_flags, + preferred_zone, migratetype, + migration_mode, &contended_compaction, &deferred_compaction, &did_some_progress); if (page) goto got_pg; - sync_migration = true; + migration_mode = MIGRATE_SYNC_LIGHT; /* * If compaction is deferred for high-order allocations, it is because @@ -2653,12 +2650,10 @@ rebalance: * direct reclaim and reclaim/compaction depends on compaction * being called after reclaim so call directly if necessary */ - page = __alloc_pages_direct_compact(gfp_mask, order, - zonelist, high_zoneidx, - nodemask, - alloc_flags, preferred_zone, - migratetype, sync_migration, - &contended_compaction, + page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, + high_zoneidx, nodemask, alloc_flags, + preferred_zone, migratetype, + migration_mode, &contended_compaction, &deferred_compaction, &did_some_progress); if (page) @@ -6218,7 +6213,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, cc->nr_migratepages -= nr_reclaimed; ret = migrate_pages(&cc->migratepages, alloc_migrate_target, - NULL, 0, MIGRATE_SYNC, MR_CMA); + NULL, 0, cc->mode, MR_CMA); } if (ret < 0) { putback_movable_pages(&cc->migratepages); @@ -6257,7 +6252,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, .nr_migratepages = 0, .order = -1, .zone = page_zone(pfn_to_page(start)), - .sync = true, + .mode = MIGRATE_SYNC, .ignore_skip_hint = true, }; INIT_LIST_HEAD(&cc.migratepages); -- cgit v1.2.3 From adfab836f4908deb049a5128082719e689eed964 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 4 Jun 2014 16:09:53 -0700 Subject: swap: change swap_info singly-linked list to list_head The logic controlling the singly-linked list of swap_info_struct entries for all active, i.e. swapon'ed, swap targets is rather complex, because: - it stores the entries in priority order - there is a pointer to the highest priority entry - there is a pointer to the highest priority not-full entry - there is a highest_priority_index variable set outside the swap_lock - swap entries of equal priority should be used equally this complexity leads to bugs such as: https://lkml.org/lkml/2014/2/13/181 where different priority swap targets are incorrectly used equally. That bug probably could be solved with the existing singly-linked lists, but I think it would only add more complexity to the already difficult to understand get_swap_page() swap_list iteration logic. The first patch changes from a singly-linked list to a doubly-linked list using list_heads; the highest_priority_index and related code are removed and get_swap_page() starts each iteration at the highest priority swap_info entry, even if it's full. While this does introduce unnecessary list iteration (i.e. Schlemiel the painter's algorithm) in the case where one or more of the highest priority entries are full, the iteration and manipulation code is much simpler and behaves correctly re: the above bug; and the fourth patch removes the unnecessary iteration. The second patch adds some minor plist helper functions; nothing new really, just functions to match existing regular list functions. These are used by the next two patches. The third patch adds plist_requeue(), which is used by get_swap_page() in the next patch - it performs the requeueing of same-priority entries (which moves the entry to the end of its priority in the plist), so that all equal-priority swap_info_structs get used equally. The fourth patch converts the main list into a plist, and adds a new plist that contains only swap_info entries that are both active and not full. As Mel suggested using plists allows removing all the ordering code from swap - plists handle ordering automatically. The list naming is also clarified now that there are two lists, with the original list changed from swap_list_head to swap_active_head and the new list named swap_avail_head. A new spinlock is also added for the new list, so swap_info entries can be added or removed from the new list immediately as they become full or not full. This patch (of 4): Replace the singly-linked list tracking active, i.e. swapon'ed, swap_info_struct entries with a doubly-linked list using struct list_heads. Simplify the logic iterating and manipulating the list of entries, especially get_swap_page(), by using standard list_head functions, and removing the highest priority iteration logic. The change fixes the bug: https://lkml.org/lkml/2014/2/13/181 in which different priority swap entries after the highest priority entry are incorrectly used equally in pairs. The swap behavior is now as advertised, i.e. different priority swap entries are used in order, and equal priority swap targets are used concurrently. Signed-off-by: Dan Streetman Acked-by: Mel Gorman Cc: Shaohua Li Cc: Hugh Dickins Cc: Dan Streetman Cc: Michal Hocko Cc: Christian Ehrhardt Cc: Weijie Yang Cc: Rik van Riel Cc: Johannes Weiner Cc: Bob Liu Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Paul Gortmaker Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 7 +- include/linux/swapfile.h | 2 +- mm/frontswap.c | 13 ++-- mm/swapfile.c | 171 ++++++++++++++++++++--------------------------- 4 files changed, 78 insertions(+), 115 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 5a14b928164e..8bb85d6d65f0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -214,8 +214,8 @@ struct percpu_cluster { struct swap_info_struct { unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ + struct list_head list; /* entry in swap list */ signed char type; /* strange name for an index */ - signed char next; /* next type on the swap list */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ @@ -255,11 +255,6 @@ struct swap_info_struct { struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */ }; -struct swap_list_t { - int head; /* head of priority-ordered swapfile list */ - int next; /* swapfile to be used next */ -}; - /* linux/mm/workingset.c */ void *workingset_eviction(struct address_space *mapping, struct page *page); bool workingset_refault(void *shadow); diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index e282624e8c10..2eab382d593d 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -6,7 +6,7 @@ * want to expose them to the dozens of source files that include swap.h */ extern spinlock_t swap_lock; -extern struct swap_list_t swap_list; +extern struct list_head swap_list_head; extern struct swap_info_struct *swap_info[]; extern int try_to_unuse(unsigned int, bool, unsigned long); diff --git a/mm/frontswap.c b/mm/frontswap.c index 1b24bdcb3197..fae11602e8a9 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -327,15 +327,12 @@ EXPORT_SYMBOL(__frontswap_invalidate_area); static unsigned long __frontswap_curr_pages(void) { - int type; unsigned long totalpages = 0; struct swap_info_struct *si = NULL; assert_spin_locked(&swap_lock); - for (type = swap_list.head; type >= 0; type = si->next) { - si = swap_info[type]; + list_for_each_entry(si, &swap_list_head, list) totalpages += atomic_read(&si->frontswap_pages); - } return totalpages; } @@ -347,11 +344,9 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, int si_frontswap_pages; unsigned long total_pages_to_unuse = total; unsigned long pages = 0, pages_to_unuse = 0; - int type; assert_spin_locked(&swap_lock); - for (type = swap_list.head; type >= 0; type = si->next) { - si = swap_info[type]; + list_for_each_entry(si, &swap_list_head, list) { si_frontswap_pages = atomic_read(&si->frontswap_pages); if (total_pages_to_unuse < si_frontswap_pages) { pages = pages_to_unuse = total_pages_to_unuse; @@ -366,7 +361,7 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, } vm_unacct_memory(pages); *unused = pages_to_unuse; - *swapid = type; + *swapid = si->type; ret = 0; break; } @@ -413,7 +408,7 @@ void frontswap_shrink(unsigned long target_pages) /* * we don't want to hold swap_lock while doing a very * lengthy try_to_unuse, but swap_list may change - * so restart scan from swap_list.head each time + * so restart scan from swap_list_head each time */ spin_lock(&swap_lock); ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type); diff --git a/mm/swapfile.c b/mm/swapfile.c index 4a7f7e6992b6..6c95a8c63b1a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -51,14 +51,17 @@ atomic_long_t nr_swap_pages; /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ long total_swap_pages; static int least_priority; -static atomic_t highest_priority_index = ATOMIC_INIT(-1); static const char Bad_file[] = "Bad swap file entry "; static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; -struct swap_list_t swap_list = {-1, -1}; +/* + * all active swap_info_structs + * protected with swap_lock, and ordered by priority. + */ +LIST_HEAD(swap_list_head); struct swap_info_struct *swap_info[MAX_SWAPFILES]; @@ -640,66 +643,54 @@ no_page: swp_entry_t get_swap_page(void) { - struct swap_info_struct *si; + struct swap_info_struct *si, *next; pgoff_t offset; - int type, next; - int wrapped = 0; - int hp_index; + struct list_head *tmp; spin_lock(&swap_lock); if (atomic_long_read(&nr_swap_pages) <= 0) goto noswap; atomic_long_dec(&nr_swap_pages); - for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { - hp_index = atomic_xchg(&highest_priority_index, -1); - /* - * highest_priority_index records current highest priority swap - * type which just frees swap entries. If its priority is - * higher than that of swap_list.next swap type, we use it. It - * isn't protected by swap_lock, so it can be an invalid value - * if the corresponding swap type is swapoff. We double check - * the flags here. It's even possible the swap type is swapoff - * and swapon again and its priority is changed. In such rare - * case, low prority swap type might be used, but eventually - * high priority swap will be used after several rounds of - * swap. - */ - if (hp_index != -1 && hp_index != type && - swap_info[type]->prio < swap_info[hp_index]->prio && - (swap_info[hp_index]->flags & SWP_WRITEOK)) { - type = hp_index; - swap_list.next = type; - } - - si = swap_info[type]; - next = si->next; - if (next < 0 || - (!wrapped && si->prio != swap_info[next]->prio)) { - next = swap_list.head; - wrapped++; - } - + list_for_each(tmp, &swap_list_head) { + si = list_entry(tmp, typeof(*si), list); spin_lock(&si->lock); - if (!si->highest_bit) { - spin_unlock(&si->lock); - continue; - } - if (!(si->flags & SWP_WRITEOK)) { + if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { spin_unlock(&si->lock); continue; } - swap_list.next = next; + /* + * rotate the current swap_info that we're going to use + * to after any other swap_info that have the same prio, + * so that all equal-priority swap_info get used equally + */ + next = si; + list_for_each_entry_continue(next, &swap_list_head, list) { + if (si->prio != next->prio) + break; + list_rotate_left(&si->list); + next = si; + } spin_unlock(&swap_lock); /* This is called for allocating swap entry for cache */ offset = scan_swap_map(si, SWAP_HAS_CACHE); spin_unlock(&si->lock); if (offset) - return swp_entry(type, offset); + return swp_entry(si->type, offset); spin_lock(&swap_lock); - next = swap_list.next; + /* + * if we got here, it's likely that si was almost full before, + * and since scan_swap_map() can drop the si->lock, multiple + * callers probably all tried to get a page from the same si + * and it filled up before we could get one. So we need to + * try again. Since we dropped the swap_lock, there may now + * be non-full higher priority swap_infos, and this si may have + * even been removed from the list (although very unlikely). + * Let's start over. + */ + tmp = &swap_list_head; } atomic_long_inc(&nr_swap_pages); @@ -766,27 +757,6 @@ out: return NULL; } -/* - * This swap type frees swap entry, check if it is the highest priority swap - * type which just frees swap entry. get_swap_page() uses - * highest_priority_index to search highest priority swap type. The - * swap_info_struct.lock can't protect us if there are multiple swap types - * active, so we use atomic_cmpxchg. - */ -static void set_highest_priority_index(int type) -{ - int old_hp_index, new_hp_index; - - do { - old_hp_index = atomic_read(&highest_priority_index); - if (old_hp_index != -1 && - swap_info[old_hp_index]->prio >= swap_info[type]->prio) - break; - new_hp_index = type; - } while (atomic_cmpxchg(&highest_priority_index, - old_hp_index, new_hp_index) != old_hp_index); -} - static unsigned char swap_entry_free(struct swap_info_struct *p, swp_entry_t entry, unsigned char usage) { @@ -830,7 +800,6 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, p->lowest_bit = offset; if (offset > p->highest_bit) p->highest_bit = offset; - set_highest_priority_index(p->type); atomic_long_inc(&nr_swap_pages); p->inuse_pages--; frontswap_invalidate_page(p->type, offset); @@ -1765,7 +1734,7 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info) { - int i, prev; + struct swap_info_struct *si; if (prio >= 0) p->prio = prio; @@ -1777,18 +1746,28 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio, atomic_long_add(p->pages, &nr_swap_pages); total_swap_pages += p->pages; - /* insert swap space into swap_list: */ - prev = -1; - for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { - if (p->prio >= swap_info[i]->prio) - break; - prev = i; + assert_spin_locked(&swap_lock); + BUG_ON(!list_empty(&p->list)); + /* + * insert into swap list; the list is in priority order, + * so that get_swap_page() can get a page from the highest + * priority swap_info_struct with available page(s), and + * swapoff can adjust the auto-assigned (i.e. negative) prio + * values for any lower-priority swap_info_structs when + * removing a negative-prio swap_info_struct + */ + list_for_each_entry(si, &swap_list_head, list) { + if (p->prio >= si->prio) { + list_add_tail(&p->list, &si->list); + return; + } } - p->next = i; - if (prev < 0) - swap_list.head = swap_list.next = p->type; - else - swap_info[prev]->next = p->type; + /* + * this covers two cases: + * 1) p->prio is less than all existing prio + * 2) the swap list is empty + */ + list_add_tail(&p->list, &swap_list_head); } static void enable_swap_info(struct swap_info_struct *p, int prio, @@ -1823,8 +1802,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) struct address_space *mapping; struct inode *inode; struct filename *pathname; - int i, type, prev; - int err; + int err, found = 0; unsigned int old_block_size; if (!capable(CAP_SYS_ADMIN)) @@ -1842,17 +1820,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) goto out; mapping = victim->f_mapping; - prev = -1; spin_lock(&swap_lock); - for (type = swap_list.head; type >= 0; type = swap_info[type]->next) { - p = swap_info[type]; + list_for_each_entry(p, &swap_list_head, list) { if (p->flags & SWP_WRITEOK) { - if (p->swap_file->f_mapping == mapping) + if (p->swap_file->f_mapping == mapping) { + found = 1; break; + } } - prev = type; } - if (type < 0) { + if (!found) { err = -EINVAL; spin_unlock(&swap_lock); goto out_dput; @@ -1864,20 +1841,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); goto out_dput; } - if (prev < 0) - swap_list.head = p->next; - else - swap_info[prev]->next = p->next; - if (type == swap_list.next) { - /* just pick something that's safe... */ - swap_list.next = swap_list.head; - } spin_lock(&p->lock); if (p->prio < 0) { - for (i = p->next; i >= 0; i = swap_info[i]->next) - swap_info[i]->prio = p->prio--; + struct swap_info_struct *si = p; + + list_for_each_entry_continue(si, &swap_list_head, list) { + si->prio++; + } least_priority++; } + list_del_init(&p->list); atomic_long_sub(p->pages, &nr_swap_pages); total_swap_pages -= p->pages; p->flags &= ~SWP_WRITEOK; @@ -1885,7 +1858,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); set_current_oom_origin(); - err = try_to_unuse(type, false, 0); /* force all pages to be unused */ + err = try_to_unuse(p->type, false, 0); /* force unuse all pages */ clear_current_oom_origin(); if (err) { @@ -1926,7 +1899,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) frontswap_map = frontswap_map_get(p); spin_unlock(&p->lock); spin_unlock(&swap_lock); - frontswap_invalidate_area(type); + frontswap_invalidate_area(p->type); frontswap_map_set(p, NULL); mutex_unlock(&swapon_mutex); free_percpu(p->percpu_cluster); @@ -1935,7 +1908,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) vfree(cluster_info); vfree(frontswap_map); /* Destroy swap account information */ - swap_cgroup_swapoff(type); + swap_cgroup_swapoff(p->type); inode = mapping->host; if (S_ISBLK(inode->i_mode)) { @@ -2142,8 +2115,8 @@ static struct swap_info_struct *alloc_swap_info(void) */ } INIT_LIST_HEAD(&p->first_swap_extent.list); + INIT_LIST_HEAD(&p->list); p->flags = SWP_USED; - p->next = -1; spin_unlock(&swap_lock); spin_lock_init(&p->lock); -- cgit v1.2.3 From fd16618e12a05df79a3439d72d5ffdac5d34f3da Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 4 Jun 2014 16:09:55 -0700 Subject: lib/plist: add helper functions Add PLIST_HEAD() to plist.h, equivalent to LIST_HEAD() from list.h, to define and initialize a struct plist_head. Add plist_for_each_continue() and plist_for_each_entry_continue(), equivalent to list_for_each_continue() and list_for_each_entry_continue(), to iterate over a plist continuing after the current position. Add plist_prev() and plist_next(), equivalent to (struct list_head*)->prev and ->next, implemented by list_prev_entry() and list_next_entry(), to access the prev/next struct plist_node entry. These are needed because unlike struct list_head, direct access of the prev/next struct plist_node isn't possible; the list must be navigated via the contained struct list_head. e.g. instead of accessing the prev by list_prev_entry(node, node_list) it can be accessed by plist_prev(node). Signed-off-by: Dan Streetman Acked-by: Mel Gorman Cc: Paul Gortmaker Cc: Steven Rostedt Cc: Thomas Gleixner Cc: Shaohua Li Cc: Hugh Dickins Cc: Dan Streetman Cc: Michal Hocko Cc: Christian Ehrhardt Cc: Weijie Yang Cc: Rik van Riel Cc: Johannes Weiner Cc: Bob Liu Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/plist.h | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'include/linux') diff --git a/include/linux/plist.h b/include/linux/plist.h index aa0fb390bd29..c81549119bd4 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -97,6 +97,13 @@ struct plist_node { .node_list = LIST_HEAD_INIT((head).node_list) \ } +/** + * PLIST_HEAD - declare and init plist_head + * @head: name for struct plist_head variable + */ +#define PLIST_HEAD(head) \ + struct plist_head head = PLIST_HEAD_INIT(head) + /** * PLIST_NODE_INIT - static struct plist_node initializer * @node: struct plist_node variable name @@ -142,6 +149,16 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); #define plist_for_each(pos, head) \ list_for_each_entry(pos, &(head)->node_list, node_list) +/** + * plist_for_each_continue - continue iteration over the plist + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * + * Continue to iterate over plist, continuing after the current position. + */ +#define plist_for_each_continue(pos, head) \ + list_for_each_entry_continue(pos, &(head)->node_list, node_list) + /** * plist_for_each_safe - iterate safely over a plist of given type * @pos: the type * to use as a loop counter @@ -162,6 +179,18 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); #define plist_for_each_entry(pos, head, mem) \ list_for_each_entry(pos, &(head)->node_list, mem.node_list) +/** + * plist_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * @m: the name of the list_struct within the struct + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define plist_for_each_entry_continue(pos, head, m) \ + list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) + /** * plist_for_each_entry_safe - iterate safely over list of given type * @pos: the type * to use as a loop counter @@ -228,6 +257,20 @@ static inline int plist_node_empty(const struct plist_node *node) container_of(plist_last(head), type, member) #endif +/** + * plist_next - get the next entry in list + * @pos: the type * to cursor + */ +#define plist_next(pos) \ + list_next_entry(pos, node_list) + +/** + * plist_prev - get the prev entry in list + * @pos: the type * to cursor + */ +#define plist_prev(pos) \ + list_prev_entry(pos, node_list) + /** * plist_first - return the first node (and thus, highest priority) * @head: the &struct plist_head pointer -- cgit v1.2.3 From a75f232ce0fe38bd01301899ecd97ffd0254316a Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 4 Jun 2014 16:09:57 -0700 Subject: lib/plist: add plist_requeue Add plist_requeue(), which moves the specified plist_node after all other same-priority plist_nodes in the list. This is essentially an optimized plist_del() followed by plist_add(). This is needed by swap, which (with the next patch in this set) uses a plist of available swap devices. When a swap device (either a swap partition or swap file) are added to the system with swapon(), the device is added to a plist, ordered by the swap device's priority. When swap needs to allocate a page from one of the swap devices, it takes the page from the first swap device on the plist, which is the highest priority swap device. The swap device is left in the plist until all its pages are used, and then removed from the plist when it becomes full. However, as described in man 2 swapon, swap must allocate pages from swap devices with the same priority in round-robin order; to do this, on each swap page allocation, swap uses a page from the first swap device in the plist, and then calls plist_requeue() to move that swap device entry to after any other same-priority swap devices. The next swap page allocation will again use a page from the first swap device in the plist and requeue it, and so on, resulting in round-robin usage of equal-priority swap devices. Also add plist_test_requeue() test function, for use by plist_test() to test plist_requeue() function. Signed-off-by: Dan Streetman Cc: Steven Rostedt Cc: Peter Zijlstra Acked-by: Mel Gorman Cc: Paul Gortmaker Cc: Thomas Gleixner Cc: Shaohua Li Cc: Hugh Dickins Cc: Dan Streetman Cc: Michal Hocko Cc: Christian Ehrhardt Cc: Weijie Yang Cc: Rik van Riel Cc: Johannes Weiner Cc: Bob Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/plist.h | 2 ++ lib/plist.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) (limited to 'include/linux') diff --git a/include/linux/plist.h b/include/linux/plist.h index c81549119bd4..8b6c970cff6c 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -141,6 +141,8 @@ static inline void plist_node_init(struct plist_node *node, int prio) extern void plist_add(struct plist_node *node, struct plist_head *head); extern void plist_del(struct plist_node *node, struct plist_head *head); +extern void plist_requeue(struct plist_node *node, struct plist_head *head); + /** * plist_for_each - iterate over the plist * @pos: the type * to use as a loop counter diff --git a/lib/plist.c b/lib/plist.c index 1ebc95f7a46f..0f2084d30798 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head) plist_check_head(head); } +/** + * plist_requeue - Requeue @node at end of same-prio entries. + * + * This is essentially an optimized plist_del() followed by + * plist_add(). It moves an entry already in the plist to + * after any other same-priority entries. + * + * @node: &struct plist_node pointer - entry to be moved + * @head: &struct plist_head pointer - list head + */ +void plist_requeue(struct plist_node *node, struct plist_head *head) +{ + struct plist_node *iter; + struct list_head *node_next = &head->node_list; + + plist_check_head(head); + BUG_ON(plist_head_empty(head)); + BUG_ON(plist_node_empty(node)); + + if (node == plist_last(head)) + return; + + iter = plist_next(node); + + if (node->prio != iter->prio) + return; + + plist_del(node, head); + + plist_for_each_continue(iter, head) { + if (node->prio != iter->prio) { + node_next = &iter->node_list; + break; + } + } + list_add_tail(&node->node_list, node_next); + + plist_check_head(head); +} + #ifdef CONFIG_DEBUG_PI_LIST #include #include @@ -170,6 +210,14 @@ static void __init plist_test_check(int nr_expect) BUG_ON(prio_pos->prio_list.next != &first->prio_list); } +static void __init plist_test_requeue(struct plist_node *node) +{ + plist_requeue(node, &test_head); + + if (node != plist_last(&test_head)) + BUG_ON(node->prio == plist_next(node)->prio); +} + static int __init plist_test(void) { int nr_expect = 0, i, loop; @@ -193,6 +241,10 @@ static int __init plist_test(void) nr_expect--; } plist_test_check(nr_expect); + if (!plist_node_empty(test_node + i)) { + plist_test_requeue(test_node + i); + plist_test_check(nr_expect); + } } for (i = 0; i < ARRAY_SIZE(test_node); i++) { -- cgit v1.2.3 From 18ab4d4ced0817421e6db6940374cc39d28d65da Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 4 Jun 2014 16:09:59 -0700 Subject: swap: change swap_list_head to plist, add swap_avail_head Originally get_swap_page() started iterating through the singly-linked list of swap_info_structs using swap_list.next or highest_priority_index, which both were intended to point to the highest priority active swap target that was not full. The first patch in this series changed the singly-linked list to a doubly-linked list, and removed the logic to start at the highest priority non-full entry; it starts scanning at the highest priority entry each time, even if the entry is full. Replace the manually ordered swap_list_head with a plist, swap_active_head. Add a new plist, swap_avail_head. The original swap_active_head plist contains all active swap_info_structs, as before, while the new swap_avail_head plist contains only swap_info_structs that are active and available, i.e. not full. Add a new spinlock, swap_avail_lock, to protect the swap_avail_head list. Mel Gorman suggested using plists since they internally handle ordering the list entries based on priority, which is exactly what swap was doing manually. All the ordering code is now removed, and swap_info_struct entries and simply added to their corresponding plist and automatically ordered correctly. Using a new plist for available swap_info_structs simplifies and optimizes get_swap_page(), which no longer has to iterate over full swap_info_structs. Using a new spinlock for swap_avail_head plist allows each swap_info_struct to add or remove themselves from the plist when they become full or not-full; previously they could not do so because the swap_info_struct->lock is held when they change from full<->not-full, and the swap_lock protecting the main swap_active_head must be ordered before any swap_info_struct->lock. Signed-off-by: Dan Streetman Acked-by: Mel Gorman Cc: Shaohua Li Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Dan Streetman Cc: Michal Hocko Cc: Christian Ehrhardt Cc: Weijie Yang Cc: Rik van Riel Cc: Johannes Weiner Cc: Bob Liu Cc: Paul Gortmaker Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 3 +- include/linux/swapfile.h | 2 +- mm/frontswap.c | 6 +- mm/swapfile.c | 145 +++++++++++++++++++++++++++++------------------ 4 files changed, 97 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 8bb85d6d65f0..9155bcdcce12 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -214,7 +214,8 @@ struct percpu_cluster { struct swap_info_struct { unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ - struct list_head list; /* entry in swap list */ + struct plist_node list; /* entry in swap_active_head */ + struct plist_node avail_list; /* entry in swap_avail_head */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index 2eab382d593d..388293a91e8c 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -6,7 +6,7 @@ * want to expose them to the dozens of source files that include swap.h */ extern spinlock_t swap_lock; -extern struct list_head swap_list_head; +extern struct plist_head swap_active_head; extern struct swap_info_struct *swap_info[]; extern int try_to_unuse(unsigned int, bool, unsigned long); diff --git a/mm/frontswap.c b/mm/frontswap.c index fae11602e8a9..c30eec536f03 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -331,7 +331,7 @@ static unsigned long __frontswap_curr_pages(void) struct swap_info_struct *si = NULL; assert_spin_locked(&swap_lock); - list_for_each_entry(si, &swap_list_head, list) + plist_for_each_entry(si, &swap_active_head, list) totalpages += atomic_read(&si->frontswap_pages); return totalpages; } @@ -346,7 +346,7 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, unsigned long pages = 0, pages_to_unuse = 0; assert_spin_locked(&swap_lock); - list_for_each_entry(si, &swap_list_head, list) { + plist_for_each_entry(si, &swap_active_head, list) { si_frontswap_pages = atomic_read(&si->frontswap_pages); if (total_pages_to_unuse < si_frontswap_pages) { pages = pages_to_unuse = total_pages_to_unuse; @@ -408,7 +408,7 @@ void frontswap_shrink(unsigned long target_pages) /* * we don't want to hold swap_lock while doing a very * lengthy try_to_unuse, but swap_list may change - * so restart scan from swap_list_head each time + * so restart scan from swap_active_head each time */ spin_lock(&swap_lock); ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type); diff --git a/mm/swapfile.c b/mm/swapfile.c index 6c95a8c63b1a..beeeef8a1b2d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -61,7 +61,22 @@ static const char Unused_offset[] = "Unused swap offset entry "; * all active swap_info_structs * protected with swap_lock, and ordered by priority. */ -LIST_HEAD(swap_list_head); +PLIST_HEAD(swap_active_head); + +/* + * all available (active, not full) swap_info_structs + * protected with swap_avail_lock, ordered by priority. + * This is used by get_swap_page() instead of swap_active_head + * because swap_active_head includes all swap_info_structs, + * but get_swap_page() doesn't need to look at full ones. + * This uses its own lock instead of swap_lock because when a + * swap_info_struct changes between not-full/full, it needs to + * add/remove itself to/from this list, but the swap_info_struct->lock + * is held and the locking order requires swap_lock to be taken + * before any swap_info_struct->lock. + */ +static PLIST_HEAD(swap_avail_head); +static DEFINE_SPINLOCK(swap_avail_lock); struct swap_info_struct *swap_info[MAX_SWAPFILES]; @@ -594,6 +609,9 @@ checks: if (si->inuse_pages == si->pages) { si->lowest_bit = si->max; si->highest_bit = 0; + spin_lock(&swap_avail_lock); + plist_del(&si->avail_list, &swap_avail_head); + spin_unlock(&swap_avail_lock); } si->swap_map[offset] = usage; inc_cluster_info_page(si, si->cluster_info, offset); @@ -645,57 +663,63 @@ swp_entry_t get_swap_page(void) { struct swap_info_struct *si, *next; pgoff_t offset; - struct list_head *tmp; - spin_lock(&swap_lock); if (atomic_long_read(&nr_swap_pages) <= 0) goto noswap; atomic_long_dec(&nr_swap_pages); - list_for_each(tmp, &swap_list_head) { - si = list_entry(tmp, typeof(*si), list); + spin_lock(&swap_avail_lock); + +start_over: + plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { + /* requeue si to after same-priority siblings */ + plist_requeue(&si->avail_list, &swap_avail_head); + spin_unlock(&swap_avail_lock); spin_lock(&si->lock); if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { + spin_lock(&swap_avail_lock); + if (plist_node_empty(&si->avail_list)) { + spin_unlock(&si->lock); + goto nextsi; + } + WARN(!si->highest_bit, + "swap_info %d in list but !highest_bit\n", + si->type); + WARN(!(si->flags & SWP_WRITEOK), + "swap_info %d in list but !SWP_WRITEOK\n", + si->type); + plist_del(&si->avail_list, &swap_avail_head); spin_unlock(&si->lock); - continue; + goto nextsi; } - /* - * rotate the current swap_info that we're going to use - * to after any other swap_info that have the same prio, - * so that all equal-priority swap_info get used equally - */ - next = si; - list_for_each_entry_continue(next, &swap_list_head, list) { - if (si->prio != next->prio) - break; - list_rotate_left(&si->list); - next = si; - } - - spin_unlock(&swap_lock); /* This is called for allocating swap entry for cache */ offset = scan_swap_map(si, SWAP_HAS_CACHE); spin_unlock(&si->lock); if (offset) return swp_entry(si->type, offset); - spin_lock(&swap_lock); + pr_debug("scan_swap_map of si %d failed to find offset\n", + si->type); + spin_lock(&swap_avail_lock); +nextsi: /* * if we got here, it's likely that si was almost full before, * and since scan_swap_map() can drop the si->lock, multiple * callers probably all tried to get a page from the same si - * and it filled up before we could get one. So we need to - * try again. Since we dropped the swap_lock, there may now - * be non-full higher priority swap_infos, and this si may have - * even been removed from the list (although very unlikely). - * Let's start over. + * and it filled up before we could get one; or, the si filled + * up between us dropping swap_avail_lock and taking si->lock. + * Since we dropped the swap_avail_lock, the swap_avail_head + * list may have been modified; so if next is still in the + * swap_avail_head list then try it, otherwise start over. */ - tmp = &swap_list_head; + if (plist_node_empty(&next->avail_list)) + goto start_over; } + spin_unlock(&swap_avail_lock); + atomic_long_inc(&nr_swap_pages); noswap: - spin_unlock(&swap_lock); return (swp_entry_t) {0}; } @@ -798,8 +822,18 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, dec_cluster_info_page(p, p->cluster_info, offset); if (offset < p->lowest_bit) p->lowest_bit = offset; - if (offset > p->highest_bit) + if (offset > p->highest_bit) { + bool was_full = !p->highest_bit; p->highest_bit = offset; + if (was_full && (p->flags & SWP_WRITEOK)) { + spin_lock(&swap_avail_lock); + WARN_ON(!plist_node_empty(&p->avail_list)); + if (plist_node_empty(&p->avail_list)) + plist_add(&p->avail_list, + &swap_avail_head); + spin_unlock(&swap_avail_lock); + } + } atomic_long_inc(&nr_swap_pages); p->inuse_pages--; frontswap_invalidate_page(p->type, offset); @@ -1734,12 +1768,16 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info) { - struct swap_info_struct *si; - if (prio >= 0) p->prio = prio; else p->prio = --least_priority; + /* + * the plist prio is negated because plist ordering is + * low-to-high, while swap ordering is high-to-low + */ + p->list.prio = -p->prio; + p->avail_list.prio = -p->prio; p->swap_map = swap_map; p->cluster_info = cluster_info; p->flags |= SWP_WRITEOK; @@ -1747,27 +1785,20 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio, total_swap_pages += p->pages; assert_spin_locked(&swap_lock); - BUG_ON(!list_empty(&p->list)); - /* - * insert into swap list; the list is in priority order, - * so that get_swap_page() can get a page from the highest - * priority swap_info_struct with available page(s), and - * swapoff can adjust the auto-assigned (i.e. negative) prio - * values for any lower-priority swap_info_structs when - * removing a negative-prio swap_info_struct - */ - list_for_each_entry(si, &swap_list_head, list) { - if (p->prio >= si->prio) { - list_add_tail(&p->list, &si->list); - return; - } - } /* - * this covers two cases: - * 1) p->prio is less than all existing prio - * 2) the swap list is empty + * both lists are plists, and thus priority ordered. + * swap_active_head needs to be priority ordered for swapoff(), + * which on removal of any swap_info_struct with an auto-assigned + * (i.e. negative) priority increments the auto-assigned priority + * of any lower-priority swap_info_structs. + * swap_avail_head needs to be priority ordered for get_swap_page(), + * which allocates swap pages from the highest available priority + * swap_info_struct. */ - list_add_tail(&p->list, &swap_list_head); + plist_add(&p->list, &swap_active_head); + spin_lock(&swap_avail_lock); + plist_add(&p->avail_list, &swap_avail_head); + spin_unlock(&swap_avail_lock); } static void enable_swap_info(struct swap_info_struct *p, int prio, @@ -1821,7 +1852,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) mapping = victim->f_mapping; spin_lock(&swap_lock); - list_for_each_entry(p, &swap_list_head, list) { + plist_for_each_entry(p, &swap_active_head, list) { if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) { found = 1; @@ -1841,16 +1872,21 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); goto out_dput; } + spin_lock(&swap_avail_lock); + plist_del(&p->avail_list, &swap_avail_head); + spin_unlock(&swap_avail_lock); spin_lock(&p->lock); if (p->prio < 0) { struct swap_info_struct *si = p; - list_for_each_entry_continue(si, &swap_list_head, list) { + plist_for_each_entry_continue(si, &swap_active_head, list) { si->prio++; + si->list.prio--; + si->avail_list.prio--; } least_priority++; } - list_del_init(&p->list); + plist_del(&p->list, &swap_active_head); atomic_long_sub(p->pages, &nr_swap_pages); total_swap_pages -= p->pages; p->flags &= ~SWP_WRITEOK; @@ -2115,7 +2151,8 @@ static struct swap_info_struct *alloc_swap_info(void) */ } INIT_LIST_HEAD(&p->first_swap_extent.list); - INIT_LIST_HEAD(&p->list); + plist_node_init(&p->list, 0); + plist_node_init(&p->avail_list, 0); p->flags = SWP_USED; spin_unlock(&swap_lock); spin_lock_init(&p->lock); -- cgit v1.2.3 From 776ed0f0377914d1e65fed903c052e9eef3f4cc3 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 4 Jun 2014 16:10:02 -0700 Subject: memcg: cleanup kmem cache creation/destruction functions naming Current names are rather inconsistent. Let's try to improve them. Brief change log: ** old name ** ** new name ** kmem_cache_create_memcg memcg_create_kmem_cache memcg_kmem_create_cache memcg_regsiter_cache memcg_kmem_destroy_cache memcg_unregister_cache kmem_cache_destroy_memcg_children memcg_cleanup_cache_params mem_cgroup_destroy_all_caches memcg_unregister_all_caches create_work memcg_register_cache_work memcg_create_cache_work_func memcg_register_cache_func memcg_create_cache_enqueue memcg_schedule_register_cache Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 +- include/linux/slab.h | 2 +- mm/memcontrol.c | 60 ++++++++++++++++++++++------------------------ mm/slab_common.c | 12 +++++----- 4 files changed, 36 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index dfc2929a3877..eb65d29516ca 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -505,7 +505,7 @@ __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); -int __kmem_cache_destroy_memcg_children(struct kmem_cache *s); +int __memcg_cleanup_cache_params(struct kmem_cache *s); /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. diff --git a/include/linux/slab.h b/include/linux/slab.h index 86e5b26fbdab..1d9abb7d22a0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -116,7 +116,7 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(void *)); #ifdef CONFIG_MEMCG_KMEM -struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *, +struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *, const char *); #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5e2bfcc96da9..d176edb1d5e8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3132,8 +3132,8 @@ void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } -static void memcg_kmem_create_cache(struct mem_cgroup *memcg, - struct kmem_cache *root_cache) +static void memcg_register_cache(struct mem_cgroup *memcg, + struct kmem_cache *root_cache) { static char memcg_name_buf[NAME_MAX + 1]; /* protected by memcg_slab_mutex */ @@ -3153,7 +3153,7 @@ static void memcg_kmem_create_cache(struct mem_cgroup *memcg, return; cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1); - cachep = kmem_cache_create_memcg(memcg, root_cache, memcg_name_buf); + cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf); /* * If we could not create a memcg cache, do not complain, because * that's not critical at all as we can always proceed with the root @@ -3175,7 +3175,7 @@ static void memcg_kmem_create_cache(struct mem_cgroup *memcg, root_cache->memcg_params->memcg_caches[id] = cachep; } -static void memcg_kmem_destroy_cache(struct kmem_cache *cachep) +static void memcg_unregister_cache(struct kmem_cache *cachep) { struct kmem_cache *root_cache; struct mem_cgroup *memcg; @@ -3228,7 +3228,7 @@ static inline void memcg_resume_kmem_account(void) current->memcg_kmem_skip_account--; } -int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) +int __memcg_cleanup_cache_params(struct kmem_cache *s) { struct kmem_cache *c; int i, failed = 0; @@ -3239,7 +3239,7 @@ int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) if (!c) continue; - memcg_kmem_destroy_cache(c); + memcg_unregister_cache(c); if (cache_from_memcg_idx(s, i)) failed++; @@ -3248,7 +3248,7 @@ int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) return failed; } -static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) +static void memcg_unregister_all_caches(struct mem_cgroup *memcg) { struct kmem_cache *cachep; struct memcg_cache_params *params, *tmp; @@ -3261,25 +3261,26 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) cachep = memcg_params_to_cache(params); kmem_cache_shrink(cachep); if (atomic_read(&cachep->memcg_params->nr_pages) == 0) - memcg_kmem_destroy_cache(cachep); + memcg_unregister_cache(cachep); } mutex_unlock(&memcg_slab_mutex); } -struct create_work { +struct memcg_register_cache_work { struct mem_cgroup *memcg; struct kmem_cache *cachep; struct work_struct work; }; -static void memcg_create_cache_work_func(struct work_struct *w) +static void memcg_register_cache_func(struct work_struct *w) { - struct create_work *cw = container_of(w, struct create_work, work); + struct memcg_register_cache_work *cw = + container_of(w, struct memcg_register_cache_work, work); struct mem_cgroup *memcg = cw->memcg; struct kmem_cache *cachep = cw->cachep; mutex_lock(&memcg_slab_mutex); - memcg_kmem_create_cache(memcg, cachep); + memcg_register_cache(memcg, cachep); mutex_unlock(&memcg_slab_mutex); css_put(&memcg->css); @@ -3289,12 +3290,12 @@ static void memcg_create_cache_work_func(struct work_struct *w) /* * Enqueue the creation of a per-memcg kmem_cache. */ -static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, - struct kmem_cache *cachep) +static void __memcg_schedule_register_cache(struct mem_cgroup *memcg, + struct kmem_cache *cachep) { - struct create_work *cw; + struct memcg_register_cache_work *cw; - cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); + cw = kmalloc(sizeof(*cw), GFP_NOWAIT); if (cw == NULL) { css_put(&memcg->css); return; @@ -3303,17 +3304,17 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, cw->memcg = memcg; cw->cachep = cachep; - INIT_WORK(&cw->work, memcg_create_cache_work_func); + INIT_WORK(&cw->work, memcg_register_cache_func); schedule_work(&cw->work); } -static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, - struct kmem_cache *cachep) +static void memcg_schedule_register_cache(struct mem_cgroup *memcg, + struct kmem_cache *cachep) { /* * We need to stop accounting when we kmalloc, because if the * corresponding kmalloc cache is not yet created, the first allocation - * in __memcg_create_cache_enqueue will recurse. + * in __memcg_schedule_register_cache will recurse. * * However, it is better to enclose the whole function. Depending on * the debugging options enabled, INIT_WORK(), for instance, can @@ -3322,7 +3323,7 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, * the safest choice is to do it like this, wrapping the whole function. */ memcg_stop_kmem_account(); - __memcg_create_cache_enqueue(memcg, cachep); + __memcg_schedule_register_cache(memcg, cachep); memcg_resume_kmem_account(); } @@ -3393,16 +3394,11 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, * * However, there are some clashes that can arrive from locking. * For instance, because we acquire the slab_mutex while doing - * kmem_cache_dup, this means no further allocation could happen - * with the slab_mutex held. - * - * Also, because cache creation issue get_online_cpus(), this - * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, - * that ends up reversed during cpu hotplug. (cpuset allocates - * a bunch of GFP_KERNEL memory during cpuup). Due to all that, - * better to defer everything. + * memcg_create_kmem_cache, this means no further allocation + * could happen with the slab_mutex held. So it's better to + * defer everything. */ - memcg_create_cache_enqueue(memcg, cachep); + memcg_schedule_register_cache(memcg, cachep); return cachep; out: rcu_read_unlock(); @@ -3526,7 +3522,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) memcg_uncharge_kmem(memcg, PAGE_SIZE << order); } #else -static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) +static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) { } #endif /* CONFIG_MEMCG_KMEM */ @@ -6372,7 +6368,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) css_for_each_descendant_post(iter, css) mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); - mem_cgroup_destroy_all_caches(memcg); + memcg_unregister_all_caches(memcg); vmpressure_cleanup(&memcg->vmpressure); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 32175617cb75..48fafb61f35e 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -261,7 +261,7 @@ EXPORT_SYMBOL(kmem_cache_create); #ifdef CONFIG_MEMCG_KMEM /* - * kmem_cache_create_memcg - Create a cache for a memory cgroup. + * memcg_create_kmem_cache - Create a cache for a memory cgroup. * @memcg: The memory cgroup the new cache is for. * @root_cache: The parent of the new cache. * @memcg_name: The name of the memory cgroup (used for naming the new cache). @@ -270,7 +270,7 @@ EXPORT_SYMBOL(kmem_cache_create); * requests going from @memcg to @root_cache. The new cache inherits properties * from its parent. */ -struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *memcg, +struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache, const char *memcg_name) { @@ -305,7 +305,7 @@ out_unlock: return s; } -static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) +static int memcg_cleanup_cache_params(struct kmem_cache *s) { int rc; @@ -314,13 +314,13 @@ static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) return 0; mutex_unlock(&slab_mutex); - rc = __kmem_cache_destroy_memcg_children(s); + rc = __memcg_cleanup_cache_params(s); mutex_lock(&slab_mutex); return rc; } #else -static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) +static int memcg_cleanup_cache_params(struct kmem_cache *s) { return 0; } @@ -343,7 +343,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; - if (kmem_cache_destroy_memcg_children(s) != 0) + if (memcg_cleanup_cache_params(s) != 0) goto out_unlock; list_del(&s->list); -- cgit v1.2.3 From ea5e9539abf1258f23e725cb9cb25aa74efa29eb Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:07 -0700 Subject: include/linux/jump_label.h: expose the reference count This patch exposes the jump_label reference count in preparation for the next patch. cpusets cares about both the jump_label being enabled and how many users of the cpusets there currently are. Signed-off-by: Peter Zijlstra Signed-off-by: Mel Gorman Cc: Johannes Weiner Cc: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/jump_label.h | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 5c1dfb2a9e73..784304b222b3 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -69,6 +69,10 @@ struct static_key { # include # define HAVE_JUMP_LABEL +#else +struct static_key { + atomic_t enabled; +}; #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ enum jump_label_type { @@ -79,6 +83,12 @@ enum jump_label_type { struct module; #include + +static inline int static_key_count(struct static_key *key) +{ + return atomic_read(&key->enabled); +} + #ifdef HAVE_JUMP_LABEL #define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL @@ -134,10 +144,6 @@ extern void jump_label_apply_nops(struct module *mod); #else /* !HAVE_JUMP_LABEL */ -struct static_key { - atomic_t enabled; -}; - static __always_inline void jump_label_init(void) { static_key_initialized = true; @@ -145,14 +151,14 @@ static __always_inline void jump_label_init(void) static __always_inline bool static_key_false(struct static_key *key) { - if (unlikely(atomic_read(&key->enabled) > 0)) + if (unlikely(static_key_count(key) > 0)) return true; return false; } static __always_inline bool static_key_true(struct static_key *key) { - if (likely(atomic_read(&key->enabled) > 0)) + if (likely(static_key_count(key) > 0)) return true; return false; } @@ -194,7 +200,7 @@ static inline int jump_label_apply_nops(struct module *mod) static inline bool static_key_enabled(struct static_key *key) { - return (atomic_read(&key->enabled) > 0); + return static_key_count(key) > 0; } #endif /* _LINUX_JUMP_LABEL_H */ -- cgit v1.2.3 From 664eeddeef6539247691197c1ac124d4aa872ab6 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:08 -0700 Subject: mm: page_alloc: use jump labels to avoid checking number_of_cpusets If cpusets are not in use then we still check a global variable on every page allocation. Use jump labels to avoid the overhead. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Johannes Weiner Cc: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpuset.h | 29 ++++++++++++++++++++++++++--- kernel/cpuset.c | 14 ++++---------- mm/page_alloc.c | 3 ++- 3 files changed, 32 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index b19d3dc2e651..ade2390ffe92 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -12,10 +12,31 @@ #include #include #include +#include #ifdef CONFIG_CPUSETS -extern int number_of_cpusets; /* How many cpusets are defined in system? */ +extern struct static_key cpusets_enabled_key; +static inline bool cpusets_enabled(void) +{ + return static_key_false(&cpusets_enabled_key); +} + +static inline int nr_cpusets(void) +{ + /* jump label reference count + the top-level cpuset */ + return static_key_count(&cpusets_enabled_key) + 1; +} + +static inline void cpuset_inc(void) +{ + static_key_slow_inc(&cpusets_enabled_key); +} + +static inline void cpuset_dec(void) +{ + static_key_slow_dec(&cpusets_enabled_key); +} extern int cpuset_init(void); extern void cpuset_init_smp(void); @@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) { - return number_of_cpusets <= 1 || + return nr_cpusets() <= 1 || __cpuset_node_allowed_softwall(node, gfp_mask); } static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) { - return number_of_cpusets <= 1 || + return nr_cpusets() <= 1 || __cpuset_node_allowed_hardwall(node, gfp_mask); } @@ -124,6 +145,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) #else /* !CONFIG_CPUSETS */ +static inline bool cpusets_enabled(void) { return false; } + static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3d54c418bd06..130017843899 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -61,12 +61,7 @@ #include #include -/* - * Tracks how many cpusets are currently defined in system. - * When there is only one cpuset (the root cpuset) we can - * short circuit some hooks. - */ -int number_of_cpusets __read_mostly; +struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE; /* See "Frequency meter" comments, below. */ @@ -611,7 +606,7 @@ static int generate_sched_domains(cpumask_var_t **domains, goto done; } - csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); + csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); if (!csa) goto done; csn = 0; @@ -1888,7 +1883,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) if (is_spread_slab(parent)) set_bit(CS_SPREAD_SLAB, &cs->flags); - number_of_cpusets++; + cpuset_inc(); if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; @@ -1939,7 +1934,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) if (is_sched_load_balance(cs)) update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); - number_of_cpusets--; + cpuset_dec(); clear_bit(CS_ONLINE, &cs->flags); mutex_unlock(&cpuset_mutex); @@ -1992,7 +1987,6 @@ int __init cpuset_init(void) if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) BUG(); - number_of_cpusets = 1; return 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b4381eaee715..a2955e101715 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1921,7 +1921,8 @@ zonelist_scan: if (IS_ENABLED(CONFIG_NUMA) && zlc_active && !zlc_zone_worth_trying(zonelist, z, allowednodes)) continue; - if ((alloc_flags & ALLOC_CPUSET) && + if (cpusets_enabled() && + (alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed_softwall(zone, gfp_mask)) continue; BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); -- cgit v1.2.3 From e58469bafd0524e848c3733bc3918d854595e20f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:16 -0700 Subject: mm: page_alloc: use word-based accesses for get/set pageblock bitmaps The test_bit operations in get/set pageblock flags are expensive. This patch reads the bitmap on a word basis and use shifts and masks to isolate the bits of interest. Similarly masks are used to set a local copy of the bitmap and then use cmpxchg to update the bitmap if there have been no other changes made in parallel. In a test running dd onto tmpfs the overhead of the pageblock-related functions went from 1.27% in profiles to 0.5%. In addition to the performance benefits, this patch closes races that are possible between: a) get_ and set_pageblock_migratetype(), where get_pageblock_migratetype() reads part of the bits before and other part of the bits after set_pageblock_migratetype() has updated them. b) set_pageblock_migratetype() and set_pageblock_skip(), where the non-atomic read-modify-update set bit operation in set_pageblock_skip() will cause lost updates to some bits changed in the set_pageblock_migratetype(). Joonsoo Kim first reported the case a) via code inspection. Vlastimil Babka's testing with a debug patch showed that either a) or b) occurs roughly once per mmtests' stress-highalloc benchmark (although not necessarily in the same pageblock). Furthermore during development of unrelated compaction patches, it was observed that frequent calls to {start,undo}_isolate_page_range() the race occurs several thousands of times and has resulted in NULL pointer dereferences in move_freepages() and free_one_page() in places where free_list[migratetype] is manipulated by e.g. list_move(). Further debugging confirmed that migratetype had invalid value of 6, causing out of bounds access to the free_list array. That confirmed that the race exist, although it may be extremely rare, and currently only fatal where page isolation is performed due to memory hot remove. Races on pageblocks being updated by set_pageblock_migratetype(), where both old and new migratetype are lower MIGRATE_RESERVE, currently cannot result in an invalid value being observed, although theoretically they may still lead to unexpected creation or destruction of MIGRATE_RESERVE pageblocks. Furthermore, things could get suddenly worse when memory isolation is used more, or when new migratetypes are added. After this patch, the race has no longer been observed in testing. Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka Reported-by: Joonsoo Kim Reported-and-tested-by: Vlastimil Babka Cc: Johannes Weiner Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Rik van Riel Cc: Peter Zijlstra Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 6 ++++- include/linux/pageblock-flags.h | 37 ++++++++++++++++++++++++----- mm/page_alloc.c | 52 +++++++++++++++++++++++++---------------- 3 files changed, 68 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 10a96ee68311..8ef1e3f71e0f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -75,9 +75,13 @@ enum { extern int page_group_by_mobility_disabled; +#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) +#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) + static inline int get_pageblock_migratetype(struct page *page) { - return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); + BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); + return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK); } struct free_area { diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 2ee8cd2466b5..c08730c10c7a 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -30,9 +30,12 @@ enum pageblock_bits { PB_migrate, PB_migrate_end = PB_migrate + 3 - 1, /* 3 bits required for migrate types */ -#ifdef CONFIG_COMPACTION PB_migrate_skip,/* If set the block is skipped by compaction */ -#endif /* CONFIG_COMPACTION */ + + /* + * Assume the bits will always align on a word. If this assumption + * changes then get/set pageblock needs updating. + */ NR_PAGEBLOCK_BITS }; @@ -62,11 +65,33 @@ extern int pageblock_order; /* Forward declaration */ struct page; +unsigned long get_pageblock_flags_mask(struct page *page, + unsigned long end_bitidx, + unsigned long mask); +void set_pageblock_flags_mask(struct page *page, + unsigned long flags, + unsigned long end_bitidx, + unsigned long mask); + /* Declarations for getting and setting flags. See mm/page_alloc.c */ -unsigned long get_pageblock_flags_group(struct page *page, - int start_bitidx, int end_bitidx); -void set_pageblock_flags_group(struct page *page, unsigned long flags, - int start_bitidx, int end_bitidx); +static inline unsigned long get_pageblock_flags_group(struct page *page, + int start_bitidx, int end_bitidx) +{ + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; + unsigned long mask = (1 << nr_flag_bits) - 1; + + return get_pageblock_flags_mask(page, end_bitidx, mask); +} + +static inline void set_pageblock_flags_group(struct page *page, + unsigned long flags, + int start_bitidx, int end_bitidx) +{ + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; + unsigned long mask = (1 << nr_flag_bits) - 1; + + set_pageblock_flags_mask(page, flags, end_bitidx, mask); +} #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 485932c577e7..6e937809c87a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6028,53 +6028,65 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) * @end_bitidx: The last bit of interest * returns pageblock_bits flags */ -unsigned long get_pageblock_flags_group(struct page *page, - int start_bitidx, int end_bitidx) +unsigned long get_pageblock_flags_mask(struct page *page, + unsigned long end_bitidx, + unsigned long mask) { struct zone *zone; unsigned long *bitmap; - unsigned long pfn, bitidx; - unsigned long flags = 0; - unsigned long value = 1; + unsigned long pfn, bitidx, word_bitidx; + unsigned long word; zone = page_zone(page); pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); + word_bitidx = bitidx / BITS_PER_LONG; + bitidx &= (BITS_PER_LONG-1); - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) - if (test_bit(bitidx + start_bitidx, bitmap)) - flags |= value; - - return flags; + word = bitmap[word_bitidx]; + bitidx += end_bitidx; + return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; } /** - * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages + * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest * @end_bitidx: The last bit of interest * @flags: The flags to set */ -void set_pageblock_flags_group(struct page *page, unsigned long flags, - int start_bitidx, int end_bitidx) +void set_pageblock_flags_mask(struct page *page, unsigned long flags, + unsigned long end_bitidx, + unsigned long mask) { struct zone *zone; unsigned long *bitmap; - unsigned long pfn, bitidx; - unsigned long value = 1; + unsigned long pfn, bitidx, word_bitidx; + unsigned long old_word, word; + + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); zone = page_zone(page); pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); + word_bitidx = bitidx / BITS_PER_LONG; + bitidx &= (BITS_PER_LONG-1); + VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) - if (flags & value) - __set_bit(bitidx + start_bitidx, bitmap); - else - __clear_bit(bitidx + start_bitidx, bitmap); + bitidx += end_bitidx; + mask <<= (BITS_PER_LONG - bitidx - 1); + flags <<= (BITS_PER_LONG - bitidx - 1); + + word = ACCESS_ONCE(bitmap[word_bitidx]); + for (;;) { + old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); + if (word == old_word) + break; + word = old_word; + } } /* -- cgit v1.2.3 From dc4b0caff24d9b2918e9f27bc65499ee63187eba Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:17 -0700 Subject: mm: page_alloc: reduce number of times page_to_pfn is called In the free path we calculate page_to_pfn multiple times. Reduce that. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Cc: Johannes Weiner Acked-by: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 9 +++++++-- include/linux/pageblock-flags.h | 33 +++++++++++++-------------------- mm/page_alloc.c | 34 +++++++++++++++++++--------------- 3 files changed, 39 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8ef1e3f71e0f..472426ac96ae 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabled; #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) -static inline int get_pageblock_migratetype(struct page *page) +#define get_pageblock_migratetype(page) \ + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + PB_migrate_end, MIGRATETYPE_MASK) + +static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) { BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); - return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK); + return get_pfnblock_flags_mask(page, pfn, PB_migrate_end, + MIGRATETYPE_MASK); } struct free_area { diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index c08730c10c7a..2baeee12f48e 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -65,33 +65,26 @@ extern int pageblock_order; /* Forward declaration */ struct page; -unsigned long get_pageblock_flags_mask(struct page *page, +unsigned long get_pfnblock_flags_mask(struct page *page, + unsigned long pfn, unsigned long end_bitidx, unsigned long mask); -void set_pageblock_flags_mask(struct page *page, + +void set_pfnblock_flags_mask(struct page *page, unsigned long flags, + unsigned long pfn, unsigned long end_bitidx, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ -static inline unsigned long get_pageblock_flags_group(struct page *page, - int start_bitidx, int end_bitidx) -{ - unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; - unsigned long mask = (1 << nr_flag_bits) - 1; - - return get_pageblock_flags_mask(page, end_bitidx, mask); -} - -static inline void set_pageblock_flags_group(struct page *page, - unsigned long flags, - int start_bitidx, int end_bitidx) -{ - unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; - unsigned long mask = (1 << nr_flag_bits) - 1; - - set_pageblock_flags_mask(page, flags, end_bitidx, mask); -} +#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + end_bitidx, \ + (1 << (end_bitidx - start_bitidx + 1)) - 1) +#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ + set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ + end_bitidx, \ + (1 << (end_bitidx - start_bitidx + 1)) - 1) #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6e937809c87a..6cadc8678e28 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -560,6 +560,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, */ static inline void __free_one_page(struct page *page, + unsigned long pfn, struct zone *zone, unsigned int order, int migratetype) { @@ -576,7 +577,7 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON(migratetype == -1); - page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); + page_idx = pfn & ((1 << MAX_ORDER) - 1); VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); @@ -711,7 +712,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, list_del(&page->lru); mt = get_freepage_migratetype(page); /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ - __free_one_page(page, zone, 0, mt); + __free_one_page(page, page_to_pfn(page), zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); if (likely(!is_migrate_isolate_page(page))) { __mod_zone_page_state(zone, NR_FREE_PAGES, 1); @@ -723,13 +724,15 @@ static void free_pcppages_bulk(struct zone *zone, int count, spin_unlock(&zone->lock); } -static void free_one_page(struct zone *zone, struct page *page, int order, +static void free_one_page(struct zone *zone, + struct page *page, unsigned long pfn, + int order, int migratetype) { spin_lock(&zone->lock); zone->pages_scanned = 0; - __free_one_page(page, zone, order, migratetype); + __free_one_page(page, pfn, zone, order, migratetype); if (unlikely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); spin_unlock(&zone->lock); @@ -766,15 +769,16 @@ static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; int migratetype; + unsigned long pfn = page_to_pfn(page); if (!free_pages_prepare(page, order)) return; local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); - migratetype = get_pageblock_migratetype(page); + migratetype = get_pfnblock_migratetype(page, pfn); set_freepage_migratetype(page, migratetype); - free_one_page(page_zone(page), page, order, migratetype); + free_one_page(page_zone(page), page, pfn, order, migratetype); local_irq_restore(flags); } @@ -1380,12 +1384,13 @@ void free_hot_cold_page(struct page *page, int cold) struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; unsigned long flags; + unsigned long pfn = page_to_pfn(page); int migratetype; if (!free_pages_prepare(page, 0)) return; - migratetype = get_pageblock_migratetype(page); + migratetype = get_pfnblock_migratetype(page, pfn); set_freepage_migratetype(page, migratetype); local_irq_save(flags); __count_vm_event(PGFREE); @@ -1399,7 +1404,7 @@ void free_hot_cold_page(struct page *page, int cold) */ if (migratetype >= MIGRATE_PCPTYPES) { if (unlikely(is_migrate_isolate(migratetype))) { - free_one_page(zone, page, 0, migratetype); + free_one_page(zone, page, pfn, 0, migratetype); goto out; } migratetype = MIGRATE_MOVABLE; @@ -6028,17 +6033,16 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) * @end_bitidx: The last bit of interest * returns pageblock_bits flags */ -unsigned long get_pageblock_flags_mask(struct page *page, +unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) { struct zone *zone; unsigned long *bitmap; - unsigned long pfn, bitidx, word_bitidx; + unsigned long bitidx, word_bitidx; unsigned long word; zone = page_zone(page); - pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); word_bitidx = bitidx / BITS_PER_LONG; @@ -6050,25 +6054,25 @@ unsigned long get_pageblock_flags_mask(struct page *page, } /** - * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages + * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest * @end_bitidx: The last bit of interest * @flags: The flags to set */ -void set_pageblock_flags_mask(struct page *page, unsigned long flags, +void set_pfnblock_flags_mask(struct page *page, unsigned long flags, + unsigned long pfn, unsigned long end_bitidx, unsigned long mask) { struct zone *zone; unsigned long *bitmap; - unsigned long pfn, bitidx, word_bitidx; + unsigned long bitidx, word_bitidx; unsigned long old_word, word; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); zone = page_zone(page); - pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); word_bitidx = bitidx / BITS_PER_LONG; -- cgit v1.2.3 From 7aeb09f9104b760fc53c98cb7d20d06640baf9e6 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:21 -0700 Subject: mm: page_alloc: use unsigned int for order in more places X86 prefers the use of unsigned types for iterators and there is a tendency to mix whether a signed or unsigned type if used for page order. This converts a number of sites in mm/page_alloc.c to use unsigned int for order where possible. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Cc: Johannes Weiner Cc: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 8 ++++---- mm/page_alloc.c | 43 +++++++++++++++++++++++-------------------- 2 files changed, 27 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 472426ac96ae..6cbd1b6c3d20 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -817,10 +817,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) extern struct mutex zonelists_mutex; void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); -bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int classzone_idx, int alloc_flags); -bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, - int classzone_idx, int alloc_flags); +bool zone_watermark_ok(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx, int alloc_flags); +bool zone_watermark_ok_safe(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx, int alloc_flags); enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce4d3716214c..37ef1b87f1f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -409,7 +409,8 @@ static int destroy_compound_page(struct page *page, unsigned long order) return bad; } -static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) +static inline void prep_zero_page(struct page *page, unsigned int order, + gfp_t gfp_flags) { int i; @@ -453,7 +454,7 @@ static inline void set_page_guard_flag(struct page *page) { } static inline void clear_page_guard_flag(struct page *page) { } #endif -static inline void set_page_order(struct page *page, int order) +static inline void set_page_order(struct page *page, unsigned int order) { set_page_private(page, order); __SetPageBuddy(page); @@ -504,7 +505,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order) * For recording page's order, we use page_private(page). */ static inline int page_is_buddy(struct page *page, struct page *buddy, - int order) + unsigned int order) { if (!pfn_valid_within(page_to_pfn(buddy))) return 0; @@ -726,7 +727,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, static void free_one_page(struct zone *zone, struct page *page, unsigned long pfn, - int order, + unsigned int order, int migratetype) { spin_lock(&zone->lock); @@ -897,7 +898,7 @@ static inline int check_new_page(struct page *page) return 0; } -static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) +static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) { int i; @@ -1108,16 +1109,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page, /* Remove an element from the buddy allocator from the fallback list */ static inline struct page * -__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) +__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) { struct free_area *area; - int current_order; + unsigned int current_order; struct page *page; int migratetype, new_type, i; /* Find the largest possible block of pages in the other list */ - for (current_order = MAX_ORDER-1; current_order >= order; - --current_order) { + for (current_order = MAX_ORDER-1; + current_order >= order && current_order <= MAX_ORDER-1; + --current_order) { for (i = 0;; i++) { migratetype = fallbacks[start_migratetype][i]; @@ -1345,7 +1347,7 @@ void mark_free_pages(struct zone *zone) { unsigned long pfn, max_zone_pfn; unsigned long flags; - int order, t; + unsigned int order, t; struct list_head *curr; if (zone_is_empty(zone)) @@ -1541,8 +1543,8 @@ int split_free_page(struct page *page) */ static inline struct page *buffered_rmqueue(struct zone *preferred_zone, - struct zone *zone, int order, gfp_t gfp_flags, - int migratetype) + struct zone *zone, unsigned int order, + gfp_t gfp_flags, int migratetype) { unsigned long flags; struct page *page; @@ -1691,8 +1693,9 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) * Return true if free pages are above 'mark'. This takes into account the order * of the allocation. */ -static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int classzone_idx, int alloc_flags, long free_pages) +static bool __zone_watermark_ok(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx, int alloc_flags, + long free_pages) { /* free_pages my go negative - that's OK */ long min = mark; @@ -1726,15 +1729,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, return true; } -bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, +bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags) { return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, zone_page_state(z, NR_FREE_PAGES)); } -bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, - int classzone_idx, int alloc_flags) +bool zone_watermark_ok_safe(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx, int alloc_flags) { long free_pages = zone_page_state(z, NR_FREE_PAGES); @@ -4121,7 +4124,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, static void __meminit zone_init_free_lists(struct zone *zone) { - int order, t; + unsigned int order, t; for_each_migratetype_order(order, t) { INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; @@ -6444,7 +6447,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { struct page *page; struct zone *zone; - int order, i; + unsigned int order, i; unsigned long pfn; unsigned long flags; /* find the first valid pfn */ @@ -6496,7 +6499,7 @@ bool is_free_buddy_page(struct page *page) struct zone *zone = page_zone(page); unsigned long pfn = page_to_pfn(page); unsigned long flags; - int order; + unsigned int order; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { -- cgit v1.2.3 From b745bc85f21ea707e4ea1a91948055fa3e72c77b Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:22 -0700 Subject: mm: page_alloc: convert hot/cold parameter and immediate callers to bool cold is a bool, make it one. Make the likely case the "if" part of the block instead of the else as according to the optimisation manual this is preferred. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Cc: Johannes Weiner Cc: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/tile/mm/homecache.c | 2 +- fs/fuse/dev.c | 2 +- include/linux/gfp.h | 4 ++-- include/linux/pagemap.h | 2 +- include/linux/swap.h | 2 +- mm/page_alloc.c | 20 ++++++++++---------- mm/swap.c | 4 ++-- mm/swap_state.c | 2 +- mm/vmscan.c | 6 +++--- 9 files changed, 22 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 004ba568d93f..33294fdc402e 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order) if (put_page_testzero(page)) { homecache_change_page_home(page, order, PAGE_HOME_HASH); if (order == 0) { - free_hot_cold_page(page, 0); + free_hot_cold_page(page, false); } else { init_page_count(page); __free_pages(page, order); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index aac71ce373e4..098f97bdcf1b 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1614,7 +1614,7 @@ out_finish: static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) { - release_pages(req->pages, req->num_pages, 0); + release_pages(req->pages, req->num_pages, false); } static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, diff --git a/include/linux/gfp.h b/include/linux/gfp.h index d382db71e300..454c99fdb79d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -371,8 +371,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); -extern void free_hot_cold_page(struct page *page, int cold); -extern void free_hot_cold_page_list(struct list_head *list, int cold); +extern void free_hot_cold_page(struct page *page, bool cold); +extern void free_hot_cold_page_list(struct list_head *list, bool cold); extern void __free_kmem_pages(struct page *page, unsigned int order); extern void free_kmem_pages(unsigned long addr, unsigned int order); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 718214c5584e..c16fb6d06e36 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -110,7 +110,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) #define page_cache_get(page) get_page(page) #define page_cache_release(page) put_page(page) -void release_pages(struct page **pages, int nr, int cold); +void release_pages(struct page **pages, int nr, bool cold); /* * speculatively take a reference to a page. diff --git a/include/linux/swap.h b/include/linux/swap.h index 9155bcdcce12..97cf16164c46 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -477,7 +477,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) #define free_page_and_swap_cache(page) \ page_cache_release(page) #define free_pages_and_swap_cache(pages, nr) \ - release_pages((pages), (nr), 0); + release_pages((pages), (nr), false); static inline void show_swap_cache_info(void) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 37ef1b87f1f3..09345ab7fb63 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1199,7 +1199,7 @@ retry_reserve: */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, int cold) + int migratetype, bool cold) { int i; @@ -1218,7 +1218,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * merge IO requests if the physical pages are ordered * properly. */ - if (likely(cold == 0)) + if (likely(!cold)) list_add(&page->lru, list); else list_add_tail(&page->lru, list); @@ -1379,9 +1379,9 @@ void mark_free_pages(struct zone *zone) /* * Free a 0-order page - * cold == 1 ? free a cold page : free a hot page + * cold == true ? free a cold page : free a hot page */ -void free_hot_cold_page(struct page *page, int cold) +void free_hot_cold_page(struct page *page, bool cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -1413,10 +1413,10 @@ void free_hot_cold_page(struct page *page, int cold) } pcp = &this_cpu_ptr(zone->pageset)->pcp; - if (cold) - list_add_tail(&page->lru, &pcp->lists[migratetype]); - else + if (!cold) list_add(&page->lru, &pcp->lists[migratetype]); + else + list_add_tail(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = ACCESS_ONCE(pcp->batch); @@ -1431,7 +1431,7 @@ out: /* * Free a list of 0-order pages */ -void free_hot_cold_page_list(struct list_head *list, int cold) +void free_hot_cold_page_list(struct list_head *list, bool cold) { struct page *page, *next; @@ -1548,7 +1548,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, { unsigned long flags; struct page *page; - int cold = !!(gfp_flags & __GFP_COLD); + bool cold = ((gfp_flags & __GFP_COLD) != 0); again: if (likely(order == 0)) { @@ -2823,7 +2823,7 @@ void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { if (order == 0) - free_hot_cold_page(page, 0); + free_hot_cold_page(page, false); else __free_pages_ok(page, order); } diff --git a/mm/swap.c b/mm/swap.c index c8d6df556ce6..11ebb9714f49 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -67,7 +67,7 @@ static void __page_cache_release(struct page *page) static void __put_single_page(struct page *page) { __page_cache_release(page); - free_hot_cold_page(page, 0); + free_hot_cold_page(page, false); } static void __put_compound_page(struct page *page) @@ -860,7 +860,7 @@ void lru_add_drain_all(void) * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ -void release_pages(struct page **pages, int nr, int cold) +void release_pages(struct page **pages, int nr, bool cold) { int i; LIST_HEAD(pages_to_free); diff --git a/mm/swap_state.c b/mm/swap_state.c index e76ace30d436..2972eee184a4 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -270,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr) for (i = 0; i < todo; i++) free_swap_cache(pagep[i]); - release_pages(pagep, todo, 0); + release_pages(pagep, todo, false); pagep += todo; nr -= todo; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 9253e188000f..494cd632178c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1121,7 +1121,7 @@ keep: VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); } - free_hot_cold_page_list(&free_pages, 1); + free_hot_cold_page_list(&free_pages, true); list_splice(&ret_pages, page_list); count_vm_events(PGACTIVATE, pgactivate); @@ -1532,7 +1532,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, spin_unlock_irq(&zone->lru_lock); - free_hot_cold_page_list(&page_list, 1); + free_hot_cold_page_list(&page_list, true); /* * If reclaim is isolating dirty pages under writeback, it implies @@ -1755,7 +1755,7 @@ static void shrink_active_list(unsigned long nr_to_scan, __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); - free_hot_cold_page_list(&l_hold, 1); + free_hot_cold_page_list(&l_hold, true); } #ifdef CONFIG_SWAP -- cgit v1.2.3 From 07a427884348d38a6fd56fa4d78249c407196650 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:24 -0700 Subject: mm: shmem: avoid atomic operation during shmem_getpage_gfp shmem_getpage_gfp uses an atomic operation to set the SwapBacked field before it's even added to the LRU or visible. This is unnecessary as what could it possible race against? Use an unlocked variant. Signed-off-by: Mel Gorman Acked-by: Johannes Weiner Acked-by: Rik van Riel Cc: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 1 + mm/shmem.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d1fe1a761047..4d4b39ab2341 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -208,6 +208,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) + __SETPAGEFLAG(SwapBacked, swapbacked) __PAGEFLAG(SlobFree, slob_free) diff --git a/mm/shmem.c b/mm/shmem.c index 9f70e02111c6..f47fb38c4889 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1132,7 +1132,7 @@ repeat: goto decused; } - SetPageSwapBacked(page); + __SetPageSwapBacked(page); __set_page_locked(page); error = mem_cgroup_charge_file(page, current->mm, gfp & GFP_RECLAIM_MASK); -- cgit v1.2.3 From 2457aec63745e235bcafb7ef312b182d8682f0fc Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 4 Jun 2014 16:10:31 -0700 Subject: mm: non-atomically mark page accessed during page cache allocation where possible aops->write_begin may allocate a new page and make it visible only to have mark_page_accessed called almost immediately after. Once the page is visible the atomic operations are necessary which is noticable overhead when writing to an in-memory filesystem like tmpfs but should also be noticable with fast storage. The objective of the patch is to initialse the accessed information with non-atomic operations before the page is visible. The bulk of filesystems directly or indirectly use grab_cache_page_write_begin or find_or_create_page for the initial allocation of a page cache page. This patch adds an init_page_accessed() helper which behaves like the first call to mark_page_accessed() but may called before the page is visible and can be done non-atomically. The primary APIs of concern in this care are the following and are used by most filesystems. find_get_page find_lock_page find_or_create_page grab_cache_page_nowait grab_cache_page_write_begin All of them are very similar in detail to the patch creates a core helper pagecache_get_page() which takes a flags parameter that affects its behavior such as whether the page should be marked accessed or not. Then old API is preserved but is basically a thin wrapper around this core function. Each of the filesystems are then updated to avoid calling mark_page_accessed when it is known that the VM interfaces have already done the job. There is a slight snag in that the timing of the mark_page_accessed() has now changed so in rare cases it's possible a page gets to the end of the LRU as PageReferenced where as previously it might have been repromoted. This is expected to be rare but it's worth the filesystem people thinking about it in case they see a problem with the timing change. It is also the case that some filesystems may be marking pages accessed that previously did not but it makes sense that filesystems have consistent behaviour in this regard. The test case used to evaulate this is a simple dd of a large file done multiple times with the file deleted on each iterations. The size of the file is 1/10th physical memory to avoid dirty page balancing. In the async case it will be possible that the workload completes without even hitting the disk and will have variable results but highlight the impact of mark_page_accessed for async IO. The sync results are expected to be more stable. The exception is tmpfs where the normal case is for the "IO" to not hit the disk. The test machine was single socket and UMA to avoid any scheduling or NUMA artifacts. Throughput and wall times are presented for sync IO, only wall times are shown for async as the granularity reported by dd and the variability is unsuitable for comparison. As async results were variable do to writback timings, I'm only reporting the maximum figures. The sync results were stable enough to make the mean and stddev uninteresting. The performance results are reported based on a run with no profiling. Profile data is based on a separate run with oprofile running. async dd 3.15.0-rc3 3.15.0-rc3 vanilla accessed-v2 ext3 Max elapsed 13.9900 ( 0.00%) 11.5900 ( 17.16%) tmpfs Max elapsed 0.5100 ( 0.00%) 0.4900 ( 3.92%) btrfs Max elapsed 12.8100 ( 0.00%) 12.7800 ( 0.23%) ext4 Max elapsed 18.6000 ( 0.00%) 13.3400 ( 28.28%) xfs Max elapsed 12.5600 ( 0.00%) 2.0900 ( 83.36%) The XFS figure is a bit strange as it managed to avoid a worst case by sheer luck but the average figures looked reasonable. samples percentage ext3 86107 0.9783 vmlinux-3.15.0-rc4-vanilla mark_page_accessed ext3 23833 0.2710 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed ext3 5036 0.0573 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed ext4 64566 0.8961 vmlinux-3.15.0-rc4-vanilla mark_page_accessed ext4 5322 0.0713 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed ext4 2869 0.0384 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed xfs 62126 1.7675 vmlinux-3.15.0-rc4-vanilla mark_page_accessed xfs 1904 0.0554 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed xfs 103 0.0030 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed btrfs 10655 0.1338 vmlinux-3.15.0-rc4-vanilla mark_page_accessed btrfs 2020 0.0273 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed btrfs 587 0.0079 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed tmpfs 59562 3.2628 vmlinux-3.15.0-rc4-vanilla mark_page_accessed tmpfs 1210 0.0696 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed tmpfs 94 0.0054 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed [akpm@linux-foundation.org: don't run init_page_accessed() against an uninitialised pointer] Signed-off-by: Mel Gorman Cc: Johannes Weiner Cc: Vlastimil Babka Cc: Jan Kara Cc: Michal Hocko Cc: Hugh Dickins Cc: Dave Hansen Cc: Theodore Ts'o Cc: "Paul E. McKenney" Cc: Oleg Nesterov Cc: Rik van Riel Cc: Peter Zijlstra Tested-by: Prabhakar Lad Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/extent_io.c | 11 +-- fs/btrfs/file.c | 5 +- fs/buffer.c | 7 +- fs/ext4/mballoc.c | 14 ++-- fs/f2fs/checkpoint.c | 3 - fs/f2fs/node.c | 2 - fs/fuse/file.c | 2 - fs/gfs2/aops.c | 1 - fs/gfs2/meta_io.c | 4 +- fs/ntfs/attrib.c | 1 - fs/ntfs/file.c | 1 - include/linux/page-flags.h | 1 + include/linux/pagemap.h | 107 ++++++++++++++++++++++-- include/linux/swap.h | 1 + mm/filemap.c | 202 +++++++++++++++++---------------------------- mm/shmem.c | 6 +- mm/swap.c | 11 +++ 17 files changed, 217 insertions(+), 162 deletions(-) (limited to 'include/linux') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f29a54e454d4..4cd0ac983f91 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4510,7 +4510,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) spin_unlock(&eb->refs_lock); } -static void mark_extent_buffer_accessed(struct extent_buffer *eb) +static void mark_extent_buffer_accessed(struct extent_buffer *eb, + struct page *accessed) { unsigned long num_pages, i; @@ -4519,7 +4520,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb) num_pages = num_extent_pages(eb->start, eb->len); for (i = 0; i < num_pages; i++) { struct page *p = extent_buffer_page(eb, i); - mark_page_accessed(p); + if (p != accessed) + mark_page_accessed(p); } } @@ -4533,7 +4535,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, start >> PAGE_CACHE_SHIFT); if (eb && atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); - mark_extent_buffer_accessed(eb); + mark_extent_buffer_accessed(eb, NULL); return eb; } rcu_read_unlock(); @@ -4581,7 +4583,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, spin_unlock(&mapping->private_lock); unlock_page(p); page_cache_release(p); - mark_extent_buffer_accessed(exists); + mark_extent_buffer_accessed(exists, p); goto free_eb; } @@ -4596,7 +4598,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, attach_extent_buffer_page(eb, p); spin_unlock(&mapping->private_lock); WARN_ON(PageDirty(p)); - mark_page_accessed(p); eb->pages[i] = p; if (!PageUptodate(p)) uptodate = 0; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ae6af072b635..74272a3f9d9b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -470,11 +470,12 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) for (i = 0; i < num_pages; i++) { /* page checked is some magic around finding pages that * have been modified without going through btrfs_set_page_dirty - * clear it here + * clear it here. There should be no need to mark the pages + * accessed as prepare_pages should have marked them accessed + * in prepare_pages via find_or_create_page() */ ClearPageChecked(pages[i]); unlock_page(pages[i]); - mark_page_accessed(pages[i]); page_cache_release(pages[i]); } } diff --git a/fs/buffer.c b/fs/buffer.c index 0d3e8d5a2299..eba6e4f621ce 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -227,7 +227,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) int all_mapped = 1; index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); - page = find_get_page(bd_mapping, index); + page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); if (!page) goto out; @@ -1366,12 +1366,13 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size) struct buffer_head *bh = lookup_bh_lru(bdev, block, size); if (bh == NULL) { + /* __find_get_block_slow will mark the page accessed */ bh = __find_get_block_slow(bdev, block); if (bh) bh_lru_install(bh); - } - if (bh) + } else touch_buffer(bh); + return bh; } EXPORT_SYMBOL(__find_get_block); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c8238a26818c..afe8a133e3d1 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) * allocating. If we are looking at the buddy cache we would * have taken a reference using ext4_mb_load_buddy and that * would have pinned buddy page to page cache. + * The call to ext4_mb_get_buddy_page_lock will mark the + * page accessed. */ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { @@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) ret = -EIO; goto err; } - mark_page_accessed(page); if (e4b.bd_buddy_page == NULL) { /* @@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) ret = -EIO; goto err; } - mark_page_accessed(page); err: ext4_mb_put_buddy_page_lock(&e4b); return ret; @@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, /* we could use find_or_create_page(), but it locks page * what we'd like to avoid in fast path ... */ - page = find_get_page(inode->i_mapping, pnum); + page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); if (page == NULL || !PageUptodate(page)) { if (page) /* @@ -1176,15 +1176,16 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, ret = -EIO; goto err; } + + /* Pages marked accessed already */ e4b->bd_bitmap_page = page; e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); - mark_page_accessed(page); block++; pnum = block / blocks_per_page; poff = block % blocks_per_page; - page = find_get_page(inode->i_mapping, pnum); + page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); if (page == NULL || !PageUptodate(page)) { if (page) page_cache_release(page); @@ -1209,9 +1210,10 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, ret = -EIO; goto err; } + + /* Pages marked accessed already */ e4b->bd_buddy_page = page; e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); - mark_page_accessed(page); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 4aa521aa9bc3..c405b8f17054 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -69,7 +69,6 @@ repeat: goto repeat; } out: - mark_page_accessed(page); return page; } @@ -137,13 +136,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type) if (!page) continue; if (PageUptodate(page)) { - mark_page_accessed(page); f2fs_put_page(page, 1); continue; } f2fs_submit_page_mbio(sbi, page, blk_addr, &fio); - mark_page_accessed(page); f2fs_put_page(page, 0); } out: diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index a161e955c4c8..57caa6eaf47b 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -967,7 +967,6 @@ repeat: goto repeat; } got_it: - mark_page_accessed(page); return page; } @@ -1022,7 +1021,6 @@ page_hit: f2fs_put_page(page, 1); return ERR_PTR(-EIO); } - mark_page_accessed(page); return page; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f680d2c44e97..903cbc9cd6bd 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1089,8 +1089,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); flush_dcache_page(page); - mark_page_accessed(page); - if (!tmp) { unlock_page(page); page_cache_release(page); diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 5a49b037da81..492123cda64a 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -577,7 +577,6 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, p = kmap_atomic(page); memcpy(buf + copied, p + offset, amt); kunmap_atomic(p); - mark_page_accessed(page); page_cache_release(page); copied += amt; index++; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 2cf09b63a6b4..b984a6e190bc 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -136,7 +136,8 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) yield(); } } else { - page = find_lock_page(mapping, index); + page = find_get_page_flags(mapping, index, + FGP_LOCK|FGP_ACCESSED); if (!page) return NULL; } @@ -153,7 +154,6 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) map_bh(bh, sdp->sd_vfs, blkno); unlock_page(page); - mark_page_accessed(page); page_cache_release(page); return bh; diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index a27e3fecefaf..250ed5b20c8f 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) if (page) { set_page_dirty(page); unlock_page(page); - mark_page_accessed(page); page_cache_release(page); } ntfs_debug("Done."); diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index db9bd8a31725..86ddab916b66 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, } do { unlock_page(pages[--do_pages]); - mark_page_accessed(pages[do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); if (unlikely(status)) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4d4b39ab2341..2093eb72785e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -198,6 +198,7 @@ struct page; /* forward declaration */ TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) + __SETPAGEFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c16fb6d06e36..0a97b583ee8d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -259,12 +259,109 @@ pgoff_t page_cache_next_hole(struct address_space *mapping, pgoff_t page_cache_prev_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); +#define FGP_ACCESSED 0x00000001 +#define FGP_LOCK 0x00000002 +#define FGP_CREAT 0x00000004 +#define FGP_WRITE 0x00000008 +#define FGP_NOFS 0x00000010 +#define FGP_NOWAIT 0x00000020 + +struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, + int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); + +/** + * find_get_page - find and get a page reference + * @mapping: the address_space to search + * @offset: the page index + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned with an increased refcount. + * + * Otherwise, %NULL is returned. + */ +static inline struct page *find_get_page(struct address_space *mapping, + pgoff_t offset) +{ + return pagecache_get_page(mapping, offset, 0, 0, 0); +} + +static inline struct page *find_get_page_flags(struct address_space *mapping, + pgoff_t offset, int fgp_flags) +{ + return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); +} + +/** + * find_lock_page - locate, pin and lock a pagecache page + * pagecache_get_page - find and get a page reference + * @mapping: the address_space to search + * @offset: the page index + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned locked and with an increased + * refcount. + * + * Otherwise, %NULL is returned. + * + * find_lock_page() may sleep. + */ +static inline struct page *find_lock_page(struct address_space *mapping, + pgoff_t offset) +{ + return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); +} + +/** + * find_or_create_page - locate or add a pagecache page + * @mapping: the page's address_space + * @index: the page's index into the mapping + * @gfp_mask: page allocation mode + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned locked and with an increased + * refcount. + * + * If the page is not present, a new page is allocated using @gfp_mask + * and added to the page cache and the VM's LRU list. The page is + * returned locked and with an increased refcount. + * + * On memory exhaustion, %NULL is returned. + * + * find_or_create_page() may sleep, even if @gfp_flags specifies an + * atomic allocation! + */ +static inline struct page *find_or_create_page(struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask) +{ + return pagecache_get_page(mapping, offset, + FGP_LOCK|FGP_ACCESSED|FGP_CREAT, + gfp_mask, gfp_mask & GFP_RECLAIM_MASK); +} + +/** + * grab_cache_page_nowait - returns locked page at given index in given cache + * @mapping: target address_space + * @index: the page index + * + * Same as grab_cache_page(), but do not wait if the page is unavailable. + * This is intended for speculative data generators, where the data can + * be regenerated if the page couldn't be grabbed. This routine should + * be safe to call while holding the lock for another page. + * + * Clear __GFP_FS when allocating the page to avoid recursion into the fs + * and deadlock against the caller's locked page. + */ +static inline struct page *grab_cache_page_nowait(struct address_space *mapping, + pgoff_t index) +{ + return pagecache_get_page(mapping, index, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, + mapping_gfp_mask(mapping), + GFP_NOFS); +} + struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); -struct page *find_get_page(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); -struct page *find_lock_page(struct address_space *mapping, pgoff_t offset); -struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, - gfp_t gfp_mask); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, unsigned int nr_entries, struct page **entries, pgoff_t *indices); @@ -287,8 +384,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping, return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } -extern struct page * grab_cache_page_nowait(struct address_space *mapping, - pgoff_t index); extern struct page * read_cache_page(struct address_space *mapping, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, diff --git a/include/linux/swap.h b/include/linux/swap.h index 97cf16164c46..4348d95e571f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -311,6 +311,7 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *head); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); +extern void init_page_accessed(struct page *page); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); diff --git a/mm/filemap.c b/mm/filemap.c index 47d235b357a7..0fcd792103f3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -981,26 +981,6 @@ out: } EXPORT_SYMBOL(find_get_entry); -/** - * find_get_page - find and get a page reference - * @mapping: the address_space to search - * @offset: the page index - * - * Looks up the page cache slot at @mapping & @offset. If there is a - * page cache page, it is returned with an increased refcount. - * - * Otherwise, %NULL is returned. - */ -struct page *find_get_page(struct address_space *mapping, pgoff_t offset) -{ - struct page *page = find_get_entry(mapping, offset); - - if (radix_tree_exceptional_entry(page)) - page = NULL; - return page; -} -EXPORT_SYMBOL(find_get_page); - /** * find_lock_entry - locate, pin and lock a page cache entry * @mapping: the address_space to search @@ -1038,66 +1018,84 @@ repeat: EXPORT_SYMBOL(find_lock_entry); /** - * find_lock_page - locate, pin and lock a pagecache page + * pagecache_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index + * @fgp_flags: PCG flags + * @gfp_mask: gfp mask to use if a page is to be allocated * - * Looks up the page cache slot at @mapping & @offset. If there is a - * page cache page, it is returned locked and with an increased - * refcount. - * - * Otherwise, %NULL is returned. - * - * find_lock_page() may sleep. - */ -struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) -{ - struct page *page = find_lock_entry(mapping, offset); - - if (radix_tree_exceptional_entry(page)) - page = NULL; - return page; -} -EXPORT_SYMBOL(find_lock_page); - -/** - * find_or_create_page - locate or add a pagecache page - * @mapping: the page's address_space - * @index: the page's index into the mapping - * @gfp_mask: page allocation mode + * Looks up the page cache slot at @mapping & @offset. * - * Looks up the page cache slot at @mapping & @offset. If there is a - * page cache page, it is returned locked and with an increased - * refcount. + * PCG flags modify how the page is returned * - * If the page is not present, a new page is allocated using @gfp_mask - * and added to the page cache and the VM's LRU list. The page is - * returned locked and with an increased refcount. + * FGP_ACCESSED: the page will be marked accessed + * FGP_LOCK: Page is return locked + * FGP_CREAT: If page is not present then a new page is allocated using + * @gfp_mask and added to the page cache and the VM's LRU + * list. The page is returned locked and with an increased + * refcount. Otherwise, %NULL is returned. * - * On memory exhaustion, %NULL is returned. + * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even + * if the GFP flags specified for FGP_CREAT are atomic. * - * find_or_create_page() may sleep, even if @gfp_flags specifies an - * atomic allocation! + * If there is a page cache page, it is returned with an increased refcount. */ -struct page *find_or_create_page(struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask) +struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, + int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) { struct page *page; - int err; + repeat: - page = find_lock_page(mapping, index); - if (!page) { - page = __page_cache_alloc(gfp_mask); + page = find_get_entry(mapping, offset); + if (radix_tree_exceptional_entry(page)) + page = NULL; + if (!page) + goto no_page; + + if (fgp_flags & FGP_LOCK) { + if (fgp_flags & FGP_NOWAIT) { + if (!trylock_page(page)) { + page_cache_release(page); + return NULL; + } + } else { + lock_page(page); + } + + /* Has the page been truncated? */ + if (unlikely(page->mapping != mapping)) { + unlock_page(page); + page_cache_release(page); + goto repeat; + } + VM_BUG_ON_PAGE(page->index != offset, page); + } + + if (page && (fgp_flags & FGP_ACCESSED)) + mark_page_accessed(page); + +no_page: + if (!page && (fgp_flags & FGP_CREAT)) { + int err; + if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) + cache_gfp_mask |= __GFP_WRITE; + if (fgp_flags & FGP_NOFS) { + cache_gfp_mask &= ~__GFP_FS; + radix_gfp_mask &= ~__GFP_FS; + } + + page = __page_cache_alloc(cache_gfp_mask); if (!page) return NULL; - /* - * We want a regular kernel memory (not highmem or DMA etc) - * allocation for the radix tree nodes, but we need to honour - * the context-specific requirements the caller has asked for. - * GFP_RECLAIM_MASK collects those requirements. - */ - err = add_to_page_cache_lru(page, mapping, index, - (gfp_mask & GFP_RECLAIM_MASK)); + + if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) + fgp_flags |= FGP_LOCK; + + /* Init accessed so avoit atomic mark_page_accessed later */ + if (fgp_flags & FGP_ACCESSED) + init_page_accessed(page); + + err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); if (unlikely(err)) { page_cache_release(page); page = NULL; @@ -1105,9 +1103,10 @@ repeat: goto repeat; } } + return page; } -EXPORT_SYMBOL(find_or_create_page); +EXPORT_SYMBOL(pagecache_get_page); /** * find_get_entries - gang pagecache lookup @@ -1404,39 +1403,6 @@ repeat: } EXPORT_SYMBOL(find_get_pages_tag); -/** - * grab_cache_page_nowait - returns locked page at given index in given cache - * @mapping: target address_space - * @index: the page index - * - * Same as grab_cache_page(), but do not wait if the page is unavailable. - * This is intended for speculative data generators, where the data can - * be regenerated if the page couldn't be grabbed. This routine should - * be safe to call while holding the lock for another page. - * - * Clear __GFP_FS when allocating the page to avoid recursion into the fs - * and deadlock against the caller's locked page. - */ -struct page * -grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) -{ - struct page *page = find_get_page(mapping, index); - - if (page) { - if (trylock_page(page)) - return page; - page_cache_release(page); - return NULL; - } - page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); - if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { - page_cache_release(page); - page = NULL; - } - return page; -} -EXPORT_SYMBOL(grab_cache_page_nowait); - /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: @@ -2406,7 +2372,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping, { const struct address_space_operations *aops = mapping->a_ops; - mark_page_accessed(page); return aops->write_end(file, mapping, pos, len, copied, page, fsdata); } EXPORT_SYMBOL(pagecache_write_end); @@ -2488,34 +2453,18 @@ EXPORT_SYMBOL(generic_file_direct_write); struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags) { - int status; - gfp_t gfp_mask; struct page *page; - gfp_t gfp_notmask = 0; + int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; - gfp_mask = mapping_gfp_mask(mapping); - if (mapping_cap_account_dirty(mapping)) - gfp_mask |= __GFP_WRITE; if (flags & AOP_FLAG_NOFS) - gfp_notmask = __GFP_FS; -repeat: - page = find_lock_page(mapping, index); + fgp_flags |= FGP_NOFS; + + page = pagecache_get_page(mapping, index, fgp_flags, + mapping_gfp_mask(mapping), + GFP_KERNEL); if (page) - goto found; + wait_for_stable_page(page); - page = __page_cache_alloc(gfp_mask & ~gfp_notmask); - if (!page) - return NULL; - status = add_to_page_cache_lru(page, mapping, index, - GFP_KERNEL & ~gfp_notmask); - if (unlikely(status)) { - page_cache_release(page); - if (status == -EEXIST) - goto repeat; - return NULL; - } -found: - wait_for_stable_page(page); return page; } EXPORT_SYMBOL(grab_cache_page_write_begin); @@ -2564,7 +2513,7 @@ again: status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); - if (unlikely(status)) + if (unlikely(status < 0)) break; if (mapping_writably_mapped(mapping)) @@ -2573,7 +2522,6 @@ again: copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); flush_dcache_page(page); - mark_page_accessed(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); if (unlikely(status < 0)) diff --git a/mm/shmem.c b/mm/shmem.c index f47fb38c4889..5402481c28d1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1372,9 +1372,13 @@ shmem_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { + int ret; struct inode *inode = mapping->host; pgoff_t index = pos >> PAGE_CACHE_SHIFT; - return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); + ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); + if (ret == 0 && *pagep) + init_page_accessed(*pagep); + return ret; } static int diff --git a/mm/swap.c b/mm/swap.c index 1fb25f8bb155..9e8e3472248b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -614,6 +614,17 @@ void mark_page_accessed(struct page *page) } EXPORT_SYMBOL(mark_page_accessed); +/* + * Used to mark_page_accessed(page) that is not visible yet and when it is + * still safe to use non-atomic ops + */ +void init_page_accessed(struct page *page) +{ + if (!PageReferenced(page)) + __SetPageReferenced(page); +} +EXPORT_SYMBOL(init_page_accessed); + static void __lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -- cgit v1.2.3 From b7596fb43aa786fb3ee5015a73034fbb9e80feaa Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 4 Jun 2014 16:10:37 -0700 Subject: include/linux/gfp.h: exclude duplicate header mmdebug.h is included twice. Signed-off-by: Andy Shevchenko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 454c99fdb79d..6eb1fb37de9a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -6,7 +6,6 @@ #include #include #include -#include struct vm_area_struct; -- cgit v1.2.3 From 4be89a34609659042ef0bf883ad76388fb5251bb Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Wed, 4 Jun 2014 16:10:38 -0700 Subject: mm/vmscan.c: use DIV_ROUND_UP for calculation of zone's balance_gap and correct comments. Currently, we use (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO to avoid a zero gap value. It's better to use DIV_ROUND_UP macro for neater code and clear meaning. Besides, the gap value is calculated against the per-zone "managed pages", not "present pages". This patch also corrects the comment and do some rephrasing. Signed-off-by: Jianyu Zhan Acked-by: Rik van Riel Acked-by: Rafael Aquini Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 8 ++++---- mm/vmscan.c | 10 ++++------ 2 files changed, 8 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 4348d95e571f..4bdbee80eede 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -166,10 +166,10 @@ enum { #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX /* - * Ratio between the present memory in the zone and the "gap" that - * we're allowing kswapd to shrink in addition to the per-zone high - * wmark, even for zones that already have the high wmark satisfied, - * in order to provide better per-zone lru behavior. We are ok to + * Ratio between zone->managed_pages and the "gap" that above the per-zone + * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that + * do not meet the (high_wmark + gap) watermark, even which already met the + * high_wmark, in order to provide better per-zone lru behavior. We are ok to * spend not more than 1% of the memory for this zone balancing "gap". */ #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 diff --git a/mm/vmscan.c b/mm/vmscan.c index 494cd632178c..cc29fca8d989 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2295,9 +2295,8 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ - balance_gap = min(low_wmark_pages(zone), - (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / - KSWAPD_ZONE_BALANCE_GAP_RATIO); + balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( + zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); @@ -2949,9 +2948,8 @@ static bool kswapd_shrink_zone(struct zone *zone, * high wmark plus a "gap" where the gap is either the low * watermark or 1% of the zone, whichever is smaller. */ - balance_gap = min(low_wmark_pages(zone), - (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / - KSWAPD_ZONE_BALANCE_GAP_RATIO); + balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( + zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); /* * If there is no low memory pressure or the zone is balanced then no -- cgit v1.2.3 From daa5ba768b9e15da8867824d2f1e8d455f1acac2 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 4 Jun 2014 16:10:52 -0700 Subject: mm/rmap.c: cleanup ttu_flags Transform action part of ttu_flags into individiual bits. These flags aren't part of any uses-space visible api or even trace events. Signed-off-by: Konstantin Khlebnikov Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 7 +++---- mm/rmap.c | 10 +++++----- 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 9be55c7617da..be574506e6a9 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -72,10 +72,9 @@ struct anon_vma_chain { }; enum ttu_flags { - TTU_UNMAP = 0, /* unmap mode */ - TTU_MIGRATION = 1, /* migration mode */ - TTU_MUNLOCK = 2, /* munlock mode */ - TTU_ACTION_MASK = 0xff, + TTU_UNMAP = 1, /* unmap mode */ + TTU_MIGRATION = 2, /* migration mode */ + TTU_MUNLOCK = 4, /* munlock mode */ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ diff --git a/mm/rmap.c b/mm/rmap.c index ab74290d185d..ea8e20d75b29 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1162,7 +1162,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (vma->vm_flags & VM_LOCKED) goto out_mlock; - if (TTU_ACTION(flags) == TTU_MUNLOCK) + if (flags & TTU_MUNLOCK) goto out_unmap; } if (!(flags & TTU_IGNORE_ACCESS)) { @@ -1230,7 +1230,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ - BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); + BUG_ON(!(flags & TTU_MIGRATION)); entry = make_migration_entry(page, pte_write(pteval)); } swp_pte = swp_entry_to_pte(entry); @@ -1239,7 +1239,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, set_pte_at(mm, address, pte, swp_pte); BUG_ON(pte_file(*pte)); } else if (IS_ENABLED(CONFIG_MIGRATION) && - (TTU_ACTION(flags) == TTU_MIGRATION)) { + (flags & TTU_MIGRATION)) { /* Establish migration entry for a file page */ swp_entry_t entry; entry = make_migration_entry(page, pte_write(pteval)); @@ -1252,7 +1252,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, out_unmap: pte_unmap_unlock(pte, ptl); - if (ret != SWAP_FAIL && TTU_ACTION(flags) != TTU_MUNLOCK) + if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) mmu_notifier_invalidate_page(mm, address); out: return ret; @@ -1539,7 +1539,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ - if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) + if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) rwc.invalid_vma = invalid_migration_vma; ret = rmap_walk(page, &rwc); -- cgit v1.2.3 From 100873d7a777b67ad35197c5a998b5e778f8bf3f Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 4 Jun 2014 16:10:56 -0700 Subject: hugetlb: rename hugepage_migration_support() to ..._supported() We already have a function named hugepages_supported(), and the similar name hugepage_migration_support() is a bit unconfortable, so let's rename it hugepage_migration_supported(). Signed-off-by: Naoya Horiguchi Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 2 +- mm/migrate.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 35786ee36f06..255cd5cc0754 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -397,7 +397,7 @@ static inline pgoff_t basepage_index(struct page *page) extern void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -static inline int hugepage_migration_support(struct hstate *h) +static inline int hugepage_migration_supported(struct hstate *h) { #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION return huge_page_shift(h) == PMD_SHIFT; @@ -453,7 +453,7 @@ static inline pgoff_t basepage_index(struct page *page) return page->index; } #define dissolve_free_huge_pages(s, e) do {} while (0) -#define hugepage_migration_support(h) 0 +#define hugepage_migration_supported(h) 0 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 244194217e39..226910cb7c9b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -544,7 +544,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid) /* Movability of hugepages depends on migration support. */ static inline gfp_t htlb_alloc_mask(struct hstate *h) { - if (hugepages_treat_as_movable || hugepage_migration_support(h)) + if (hugepages_treat_as_movable || hugepage_migration_supported(h)) return GFP_HIGHUSER_MOVABLE; else return GFP_HIGHUSER; diff --git a/mm/migrate.c b/mm/migrate.c index 2a459675eeab..63f0cd559999 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1039,7 +1039,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, * tables or check whether the hugepage is pmd-based or not before * kicking migration. */ - if (!hugepage_migration_support(page_hstate(hpage))) { + if (!hugepage_migration_supported(page_hstate(hpage))) { putback_active_hugepage(hpage); return -ENOSYS; } -- cgit v1.2.3 From 50417c55562c03e6746b13aee650c2bbb048fea3 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Wed, 4 Jun 2014 16:11:07 -0700 Subject: mm/zbud.c: make size unsigned like unique callsite zbud_alloc is only called by zswap_frontswap_store with unsigned int len. Change function parameter + update >= 0 check. Signed-off-by: Fabian Frederick Acked-by: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/zbud.h | 2 +- mm/zbud.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/zbud.h b/include/linux/zbud.h index 2571a5cfa5fc..13af0d450bf6 100644 --- a/include/linux/zbud.h +++ b/include/linux/zbud.h @@ -11,7 +11,7 @@ struct zbud_ops { struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); void zbud_destroy_pool(struct zbud_pool *pool); -int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, +int zbud_alloc(struct zbud_pool *pool, unsigned int size, gfp_t gfp, unsigned long *handle); void zbud_free(struct zbud_pool *pool, unsigned long handle); int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); diff --git a/mm/zbud.c b/mm/zbud.c index 9451361e6aa7..01df13a7e2e1 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -247,7 +247,7 @@ void zbud_destroy_pool(struct zbud_pool *pool) * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate * a new page. */ -int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, +int zbud_alloc(struct zbud_pool *pool, unsigned int size, gfp_t gfp, unsigned long *handle) { int chunks, i, freechunks; @@ -255,7 +255,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, enum buddy bud; struct page *page; - if (size <= 0 || gfp & __GFP_HIGHMEM) + if (!size || (gfp & __GFP_HIGHMEM)) return -EINVAL; if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) return -ENOSPC; -- cgit v1.2.3 From 2c0d259e0e580dd95dd5d2d5aa4926169228d4a0 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 4 Jun 2014 16:11:16 -0700 Subject: compiler.h: avoid sparse errors in __compiletime_error_fallback() Usually, BUG_ON and friends aren't even evaluated in sparse, but recently compiletime_assert_atomic_type() was added, and that now results in a sparse warning every time it is used. The reason turns out to be the temporary variable, after it sparse no longer considers the value to be a constant, and results in a warning and an error. The error is the more annoying part of this as it suppresses any further warnings in the same file, hiding other problems. Unfortunately the condition cannot be simply expanded out to avoid the temporary variable since it breaks compiletime_assert on old versions of GCC such as GCC 4.2.4 which the latest metag compiler is based on. Therefore #ifndef __CHECKER__ out the __compiletime_error_fallback which uses the potentially negative size array to trigger a conditional compiler error, so that sparse doesn't see it. Signed-off-by: James Hogan Cc: Johannes Berg Cc: Daniel Santos Cc: Luciano Coelho Cc: Peter Zijlstra Cc: Paul E. McKenney Acked-by: Johannes Berg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ee7239ea1583..64fdfe1cfcf0 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -323,9 +323,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #endif #ifndef __compiletime_error # define __compiletime_error(message) -# define __compiletime_error_fallback(condition) \ +/* + * Sparse complains of variable sized arrays due to the temporary variable in + * __compiletime_assert. Unfortunately we can't just expand it out to make + * sparse see a constant array size without breaking compiletime_assert on old + * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. + */ +# ifndef __CHECKER__ +# define __compiletime_error_fallback(condition) \ do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -#else +# endif +#endif +#ifndef __compiletime_error_fallback # define __compiletime_error_fallback(condition) do { } while (0) #endif -- cgit v1.2.3 From b300a4ea665f7fa44f015616ac1874deca891c5e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 4 Jun 2014 16:11:27 -0700 Subject: kernel/user.c: drop unused field 'files' from user_struct Nobody seems uses it for a long time. Let's drop it. Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 - kernel/user.c | 1 - 2 files changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 2f2dd7d932a2..611676fd4c2c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -745,7 +745,6 @@ static inline int signal_group_exit(const struct signal_struct *sig) struct user_struct { atomic_t __count; /* reference count */ atomic_t processes; /* How many processes does this user have? */ - atomic_t files; /* How many open files does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */ #ifdef CONFIG_INOTIFY_USER atomic_t inotify_watches; /* How many inotify watches does this user have? */ diff --git a/kernel/user.c b/kernel/user.c index 294fc6a94168..4efa39350e44 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -87,7 +87,6 @@ static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { .__count = ATOMIC_INIT(1), .processes = ATOMIC_INIT(1), - .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), .locked_shm = 0, .uid = GLOBAL_ROOT_UID, -- cgit v1.2.3 From aac74dc495456412c4130a1167ce4beb6c1f0b38 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 4 Jun 2014 16:11:40 -0700 Subject: printk: rename printk_sched to printk_deferred After learning we'll need some sort of deferred printk functionality in the timekeeping core, Peter suggested we rename the printk_sched function so it can be reused by needed subsystems. This only changes the function name. No logic changes. Signed-off-by: John Stultz Reviewed-by: Steven Rostedt Cc: Jan Kara Cc: Peter Zijlstra Cc: Jiri Bohac Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/printk.h | 6 +++--- kernel/printk/printk.c | 2 +- kernel/sched/core.c | 2 +- kernel/sched/deadline.c | 2 +- kernel/sched/rt.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/printk.h b/include/linux/printk.h index 8752f7595b27..7847301e2837 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -128,9 +128,9 @@ asmlinkage __printf(1, 2) __cold int printk(const char *fmt, ...); /* - * Special printk facility for scheduler use only, _DO_NOT_USE_ ! + * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! */ -__printf(1, 2) __cold int printk_sched(const char *fmt, ...); +__printf(1, 2) __cold int printk_deferred(const char *fmt, ...); /* * Please don't use printk_ratelimit(), because it shares ratelimiting state @@ -165,7 +165,7 @@ int printk(const char *s, ...) return 0; } static inline __printf(1, 2) __cold -int printk_sched(const char *s, ...) +int printk_deferred(const char *s, ...) { return 0; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index dc2b8bd9bc1e..35d9db251903 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2585,7 +2585,7 @@ void wake_up_klogd(void) preempt_enable(); } -int printk_sched(const char *fmt, ...) +int printk_deferred(const char *fmt, ...) { va_list args; int r; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 913c6d6cc2c1..caf03e89a068 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1367,7 +1367,7 @@ out: * leave kernel. */ if (p->mm && printk_ratelimit()) { - printk_sched("process %d (%s) no longer affine to cpu%d\n", + printk_deferred("process %d (%s) no longer affine to cpu%d\n", task_pid_nr(p), p->comm, cpu); } } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f9ca7d19781a..d17e1c48a79d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -352,7 +352,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, if (!lag_once) { lag_once = true; - printk_sched("sched: DL replenish lagged to much\n"); + printk_deferred("sched: DL replenish lagged to much\n"); } dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->runtime = pi_se->dl_runtime; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0ebfd7a29472..5d7667b37c21 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -896,7 +896,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) if (!once) { once = true; - printk_sched("sched: RT throttling activated\n"); + printk_deferred("sched: RT throttling activated\n"); } } else { /* -- cgit v1.2.3 From c224815dac9c739b79050d3cc67443ff500bc478 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 4 Jun 2014 16:11:41 -0700 Subject: printk: Add printk_deferred_once Two of the three prink_deferred uses are really printk_once style uses, so add a printk_deferred_once macro to simplify those call sites. Signed-off-by: John Stultz Reviewed-by: Steven Rostedt Reviewed-by: Jan Kara Cc: Peter Zijlstra Cc: Jiri Bohac Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/printk.h | 11 +++++++++++ kernel/sched/deadline.c | 7 +------ kernel/sched/rt.c | 8 +------- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/printk.h b/include/linux/printk.h index 7847301e2837..f086d6c99dbc 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -266,9 +266,20 @@ extern asmlinkage void dump_stack(void) __cold; printk(fmt, ##__VA_ARGS__); \ } \ }) +#define printk_deferred_once(fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk_deferred(fmt, ##__VA_ARGS__); \ + } \ +}) #else #define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) +#define printk_deferred_once(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) #endif #define pr_emerg_once(fmt, ...) \ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d17e1c48a79d..e1574fca03b5 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -348,12 +348,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, * entity. */ if (dl_time_before(dl_se->deadline, rq_clock(rq))) { - static bool lag_once = false; - - if (!lag_once) { - lag_once = true; - printk_deferred("sched: DL replenish lagged to much\n"); - } + printk_deferred_once("sched: DL replenish lagged to much\n"); dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->runtime = pi_se->dl_runtime; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 5d7667b37c21..b3512f1afce9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -890,14 +890,8 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) * but accrue some time due to boosting. */ if (likely(rt_b->rt_runtime)) { - static bool once = false; - rt_rq->rt_throttled = 1; - - if (!once) { - once = true; - printk_deferred("sched: RT throttling activated\n"); - } + printk_deferred_once("sched: RT throttling activated\n"); } else { /* * In case we did anyway, make it go away, -- cgit v1.2.3 From 6e099f557d9c6797c3ee3ee7b5c8cebe543ec1cc Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 4 Jun 2014 16:11:44 -0700 Subject: Documentation: expand/clarify debug documentation The pr_debug() and related debug print macros all differ from the normal pr_XXX() macros, in that the normal ones print unconditionally, while the debug macros are compiled out unless DEBUG is defined or CONFIG_DYNAMIC_DEBUG is set. This isn't obvious, and the only way to find this out is either to review the actual printk.h code or to read CodingStyle, and the message there doesn't highlight the fact. Change Documentation/CodingStyle to clearly indicate that pr_debug() and related debug printing macros behave differently than all other pr_XXX() macros, and attempt to clarify when and where the different debug printing methods might be used. Add short comment to printk.h above the pr_XXX() macros indicating that while these macros print unconditionally, pr_debug() does not. Signed-off-by: Dan Streetman Cc: Joe Perches Cc: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/CodingStyle | 22 +++++++++++++++------- include/linux/printk.h | 6 ++++++ 2 files changed, 21 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle index 7fe0546c504a..6b6bef31e956 100644 --- a/Documentation/CodingStyle +++ b/Documentation/CodingStyle @@ -660,15 +660,23 @@ There are a number of driver model diagnostic macros in which you should use to make sure messages are matched to the right device and driver, and are tagged with the right level: dev_err(), dev_warn(), dev_info(), and so forth. For messages that aren't associated with a -particular device, defines pr_debug() and pr_info(). +particular device, defines pr_notice(), pr_info(), +pr_warn(), pr_err(), etc. Coming up with good debugging messages can be quite a challenge; and once -you have them, they can be a huge help for remote troubleshooting. Such -messages should be compiled out when the DEBUG symbol is not defined (that -is, by default they are not included). When you use dev_dbg() or pr_debug(), -that's automatic. Many subsystems have Kconfig options to turn on -DDEBUG. -A related convention uses VERBOSE_DEBUG to add dev_vdbg() messages to the -ones already enabled by DEBUG. +you have them, they can be a huge help for remote troubleshooting. However +debug message printing is handled differently than printing other non-debug +messages. While the other pr_XXX() functions print unconditionally, +pr_debug() does not; it is compiled out by default, unless either DEBUG is +defined or CONFIG_DYNAMIC_DEBUG is set. That is true for dev_dbg() also, +and a related convention uses VERBOSE_DEBUG to add dev_vdbg() messages to +the ones already enabled by DEBUG. + +Many subsystems have Kconfig debug options to turn on -DDEBUG in the +corresponding Makefile; in other cases specific files #define DEBUG. And +when a debug message should be unconditionally printed, such as if it is +already inside a debug-related #ifdef secton, printk(KERN_DEBUG ...) can be +used. Chapter 14: Allocating memory diff --git a/include/linux/printk.h b/include/linux/printk.h index f086d6c99dbc..37f3a6589c1c 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -210,6 +210,12 @@ extern asmlinkage void dump_stack(void) __cold; #define pr_fmt(fmt) fmt #endif +/* + * These can be used to print at the various log levels. + * All of these will print unconditionally, although note that pr_debug() + * and other debug macros are compiled out unless either DEBUG is defined + * or CONFIG_DYNAMIC_DEBUG is set. + */ #define pr_emerg(fmt, ...) \ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert(fmt, ...) \ -- cgit v1.2.3 From a8fe19ebfbfd90ec17c02284717238b02efb9580 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 4 Jun 2014 16:11:46 -0700 Subject: kernel/printk: use symbolic defines for console loglevels ... instead of naked numbers. Stuff in sysrq.c used to set it to 8 which is supposed to mean above default level so set it to DEBUG instead as we're terminating/killing all tasks and we want to be verbose there. Also, correct the check in x86_64_start_kernel which should be >= as we're clearly issuing the string there for all debug levels, not only the magical 10. Signed-off-by: Borislav Petkov Acked-by: Kees Cook Acked-by: Randy Dunlap Cc: Joe Perches Cc: Valdis Kletnieks Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/head64.c | 2 +- arch/x86/platform/uv/uv_nmi.c | 2 +- drivers/nubus/nubus.c | 18 +++++++++--------- drivers/tty/sysrq.c | 8 ++++---- include/linux/printk.h | 15 +++++++++++++-- init/main.c | 4 ++-- kernel/debug/kdb/kdb_bt.c | 2 +- kernel/debug/kdb/kdb_io.c | 2 +- kernel/debug/kdb/kdb_main.c | 2 +- kernel/printk/printk.c | 13 +++---------- 10 files changed, 36 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 068054f4bf20..eda1a865641e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -172,7 +172,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) */ load_ucode_bsp(); - if (console_loglevel == 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) early_printk("Kernel alive\n"); clear_page(init_level4_pgt); diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index be27da60dc8f..c89c93320c12 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -85,7 +85,7 @@ static cpumask_var_t uv_nmi_cpu_mask; * Default is all stack dumps go to the console and buffer. * Lower level to send to log buffer only. */ -static int uv_nmi_loglevel = 7; +static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644); /* diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c index 43926cd25ae8..5066a7ef7b6c 100644 --- a/drivers/nubus/nubus.c +++ b/drivers/nubus/nubus.c @@ -473,7 +473,7 @@ static struct nubus_dev* __init if (slot == 0 && (unsigned long)dir.base % 2) dir.base += 1; - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_functional_resource: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); @@ -568,7 +568,7 @@ static int __init nubus_get_vidnames(struct nubus_board* board, printk(KERN_INFO " video modes supported:\n"); nubus_get_subdir(parent, &dir); - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_vidnames: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); @@ -629,7 +629,7 @@ static int __init nubus_get_vendorinfo(struct nubus_board* board, printk(KERN_INFO " vendor info:\n"); nubus_get_subdir(parent, &dir); - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_vendorinfo: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); @@ -654,7 +654,7 @@ static int __init nubus_get_board_resource(struct nubus_board* board, int slot, struct nubus_dirent ent; nubus_get_subdir(parent, &dir); - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_board_resource: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); @@ -753,19 +753,19 @@ static void __init nubus_find_rom_dir(struct nubus_board* board) if (nubus_readdir(&dir, &ent) == -1) goto badrom; - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_INFO "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* This one takes us to where we want to go. */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); nubus_get_subdir(&ent, &dir); /* Resource ID 01, also an "Unknown Macintosh" */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* FIXME: the first one is *not* always the right one. We @@ -780,7 +780,7 @@ static void __init nubus_find_rom_dir(struct nubus_board* board) path to that address... */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; - if (console_loglevel >= 10) + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* Bwahahahaha... */ @@ -816,7 +816,7 @@ static struct nubus_board* __init nubus_add_board(int slot, int bytelanes) board->fblock = rp; /* Dump the format block for debugging purposes */ - if (console_loglevel >= 10) { + if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) { int i; printk(KERN_DEBUG "Slot %X, format block at 0x%p\n", slot, rp); diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index ce396ecdf412..b767a64e49d9 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -88,7 +88,7 @@ static void sysrq_handle_loglevel(int key) int i; i = key - '0'; - console_loglevel = 7; + console_loglevel = CONSOLE_LOGLEVEL_DEFAULT; printk("Loglevel set to %d\n", i); console_loglevel = i; } @@ -343,7 +343,7 @@ static void send_sig_all(int sig) static void sysrq_handle_term(int key) { send_sig_all(SIGTERM); - console_loglevel = 8; + console_loglevel = CONSOLE_LOGLEVEL_DEBUG; } static struct sysrq_key_op sysrq_term_op = { .handler = sysrq_handle_term, @@ -387,7 +387,7 @@ static struct sysrq_key_op sysrq_thaw_op = { static void sysrq_handle_kill(int key) { send_sig_all(SIGKILL); - console_loglevel = 8; + console_loglevel = CONSOLE_LOGLEVEL_DEBUG; } static struct sysrq_key_op sysrq_kill_op = { .handler = sysrq_handle_kill, @@ -520,7 +520,7 @@ void __handle_sysrq(int key, bool check_mask) * routing in the consumers of /proc/kmsg. */ orig_log_level = console_loglevel; - console_loglevel = 7; + console_loglevel = CONSOLE_LOGLEVEL_DEFAULT; printk(KERN_INFO "SysRq : "); op_p = __sysrq_get_key_op(key); diff --git a/include/linux/printk.h b/include/linux/printk.h index 37f3a6589c1c..319ff7e53efb 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -30,6 +30,17 @@ static inline const char *printk_skip_level(const char *buffer) return buffer; } +/* printk's without a loglevel use this.. */ +#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL + +/* We show everything that is MORE important than this.. */ +#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ +#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ +#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ +#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ +#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ +#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ + extern int console_printk[]; #define console_loglevel (console_printk[0]) @@ -39,13 +50,13 @@ extern int console_printk[]; static inline void console_silent(void) { - console_loglevel = 0; + console_loglevel = CONSOLE_LOGLEVEL_SILENT; } static inline void console_verbose(void) { if (console_loglevel) - console_loglevel = 15; + console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; } struct va_format { diff --git a/init/main.c b/init/main.c index e08c0b2065a1..04fab8d74c89 100644 --- a/init/main.c +++ b/init/main.c @@ -203,13 +203,13 @@ EXPORT_SYMBOL(loops_per_jiffy); static int __init debug_kernel(char *str) { - console_loglevel = 10; + console_loglevel = CONSOLE_LOGLEVEL_DEBUG; return 0; } static int __init quiet_kernel(char *str) { - console_loglevel = 4; + console_loglevel = CONSOLE_LOGLEVEL_QUIET; return 0; } diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c index b03e0e814e43..fe15fff5df53 100644 --- a/kernel/debug/kdb/kdb_bt.c +++ b/kernel/debug/kdb/kdb_bt.c @@ -21,7 +21,7 @@ static void kdb_show_stack(struct task_struct *p, void *addr) { int old_lvl = console_loglevel; - console_loglevel = 15; + console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; kdb_trap_printk++; kdb_set_current_task(p); if (addr) { diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 14ff4849262c..7c70812caea5 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -710,7 +710,7 @@ kdb_printit: } if (logging) { saved_loglevel = console_loglevel; - console_loglevel = 0; + console_loglevel = CONSOLE_LOGLEVEL_SILENT; printk(KERN_INFO "%s", kdb_buffer); } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 0b097c8a1e50..2f7c760305ca 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1091,7 +1091,7 @@ static int kdb_reboot(int argc, const char **argv) static void kdb_dumpregs(struct pt_regs *regs) { int old_lvl = console_loglevel; - console_loglevel = 15; + console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; kdb_trap_printk++; show_regs(regs); kdb_trap_printk--; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 923c5d4e4202..ea2d5f6962ed 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -54,18 +54,11 @@ #include "console_cmdline.h" #include "braille.h" -/* printk's without a loglevel use this.. */ -#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL - -/* We show everything that is MORE important than this.. */ -#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ -#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ - int console_printk[4] = { - DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ + CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ - MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ - DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ + CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ + CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ }; /* Deferred messaged from sched code are marked by this special level */ -- cgit v1.2.3 From 34a1b7236ad6113883f6c448d1da854cad60265e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 4 Jun 2014 16:12:19 -0700 Subject: kthreads: kill CLONE_KERNEL, change kernel_thread(kernel_init) to avoid CLONE_SIGHAND 1. Remove CLONE_KERNEL, it has no users and it is dangerous. The (old) comment says "List of flags we want to share for kernel threads" but this is not true, we do not want to share ->sighand by default. This flag can only be used if the caller is sure that both parent/child will never play with signals (say, allow_signal/etc). 2. Change rest_init() to clone kernel_init() without CLONE_SIGHAND. In this case CLONE_SIGHAND does not really hurt, and it looks like optimization because copy_sighand() can avoid kmem_cache_alloc(). But in fact this only adds the minor pessimization. kernel_init() is going to exec the init process, and de_thread() will need to unshare ->sighand and do kmem_cache_alloc(sighand_cachep) anyway, but it needs to do more work and take tasklist_lock and siglock. Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Acked-by: Steven Rostedt Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mathieu Desnoyers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 6 ------ init/main.c | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 611676fd4c2c..8fcd0e6098d9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -136,12 +136,6 @@ struct filename; #define VMACACHE_SIZE (1U << VMACACHE_BITS) #define VMACACHE_MASK (VMACACHE_SIZE - 1) -/* - * List of flags we want to share for kernel threads, - * if only because they are not used by them anyway. - */ -#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) - /* * These are the constant used to fake the fixed-point load-average * counting. Some notes: diff --git a/init/main.c b/init/main.c index 8ac3833f2bdf..4de815c0309a 100644 --- a/init/main.c +++ b/init/main.c @@ -380,7 +380,7 @@ static noinline void __init_refok rest_init(void) * the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ - kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); + kernel_thread(kernel_init, NULL, CLONE_FS); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); rcu_read_lock(); -- cgit v1.2.3 From 647f010bff6795b3e85c2b5a7768c0594a049ab0 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 4 Jun 2014 16:12:20 -0700 Subject: init/main.c: remove an ifdef Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/proc_fs.h | 4 ++++ init/main.c | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 608e60a74c3c..9d117f61d976 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -44,6 +44,10 @@ extern int remove_proc_subtree(const char *, struct proc_dir_entry *); #else /* CONFIG_PROC_FS */ +static inline void proc_root_init(void) +{ +} + static inline void proc_flush_task(struct task_struct *task) { } diff --git a/init/main.c b/init/main.c index 4de815c0309a..17d47bcdf573 100644 --- a/init/main.c +++ b/init/main.c @@ -629,9 +629,7 @@ asmlinkage __visible void __init start_kernel(void) signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); -#ifdef CONFIG_PROC_FS proc_root_init(); -#endif cgroup_init(); cpuset_init(); taskstats_init_early(); -- cgit v1.2.3 From 31632dbdba85aafc8a6772d578c5c14f84a1fe17 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Fri, 6 Jun 2014 14:35:49 -0700 Subject: drivers/rtc/rtc-cmos.c: drivers/char/rtc.c features for DECstation support This brings in drivers/char/rtc.c functionality required for DECstation and, should the maintainers decide to switch, Alpha systems to use rtc-cmos. Specifically these features are made available: * RTC iomem rather than x86/PCI port I/O mapping, controlled with the RTC_IOMAPPED macro as with the original driver. The DS1287A chip in all DECstation systems is mapped in the host bus address space as a contiguous block of 64 32-bit words of which the least significant byte accesses the RTC chip for both reads and writes. All the address and data window register accesses are made transparently by the chipset glue logic so that the device appears directly mapped on the host bus. * A way to set the size of the address space explicitly with the newly-added `address_space' member of the platform part of the RTC device structure. This avoids the unreliable heuristics that does not work in a setup where the RTC is not explicitly accessed with the usual address and data window register pair. * The ability to use the RTC periodic interrupt as a system clock device, which is implemented by arch/mips/kernel/cevt-ds1287.c for DECstation systems and takes the RTC interrupt away from the RTC driver. Eventually hooking back to the clock device's interrupt handler should be possible for the purpose of the alarm clock and possibly also update-in-progress interrupt, but this is not done by this change. o To avoid interfering with the clock interrupt all the places where the RTC interrupt mask is fiddled with are only executed if and IRQ has been assigned to the RTC driver. o To avoid changing the clock setup Register A is not fiddled with if CMOS_RTC_FLAGS_NOFREQ is set in the newly-added `flags' member of the platform part of the RTC device structure. Originally, in drivers/char/rtc.c, this was keyed with the absence of the RTC interrupt, just like the interrupt mask, but there only the periodic interrupt frequency is set, whereas rtc-cmos also sets the divider bits. Therefore a new flag is introduced so that systems where the RTC interrupt is not usable rather than used as a system clock device can fully initialise the RTC. * A small clean-up is made to the IRQ assignment code that makes the IRQ number hardcoded to -1 rather than arbitrary -ENXIO (or whatever error happens to be returned by platform_get_irq) where no IRQ has been assigned to the RTC driver (NO_IRQ might be another candidate, but it looks like this macro has inconsistent or missing definitions and limited use and might therefore be unsafe). Verified to work correctly with a DECstation 5000/240 system. [akpm@linux-foundation.org: fix weird code layout] Signed-off-by: Maciej W. Rozycki Cc: Alessandro Zummo Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rtc/rtc-cmos.c | 85 ++++++++++++++++++++++++++++++++------------- include/linux/mc146818rtc.h | 4 +++ 2 files changed, 64 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 0963c9309c74..b0e4a3eb33c7 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -647,6 +647,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) int retval = 0; unsigned char rtc_control; unsigned address_space; + u32 flags = 0; /* there can be only one ... */ if (cmos_rtc.dev) @@ -660,9 +661,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) * REVISIT non-x86 systems may instead use memory space resources * (needing ioremap etc), not i/o space resources like this ... */ - ports = request_region(ports->start, - resource_size(ports), - driver_name); + if (RTC_IOMAPPED) + ports = request_region(ports->start, resource_size(ports), + driver_name); + else + ports = request_mem_region(ports->start, resource_size(ports), + driver_name); if (!ports) { dev_dbg(dev, "i/o registers already in use\n"); return -EBUSY; @@ -699,6 +703,11 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) * expect CMOS_READ and friends to handle. */ if (info) { + if (info->flags) + flags = info->flags; + if (info->address_space) + address_space = info->address_space; + if (info->rtc_day_alarm && info->rtc_day_alarm < 128) cmos_rtc.day_alrm = info->rtc_day_alarm; if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128) @@ -726,18 +735,21 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) spin_lock_irq(&rtc_lock); - /* force periodic irq to CMOS reset default of 1024Hz; - * - * REVISIT it's been reported that at least one x86_64 ALI mobo - * doesn't use 32KHz here ... for portability we might need to - * do something about other clock frequencies. - */ - cmos_rtc.rtc->irq_freq = 1024; - hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq); - CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT); + if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) { + /* force periodic irq to CMOS reset default of 1024Hz; + * + * REVISIT it's been reported that at least one x86_64 ALI + * mobo doesn't use 32KHz here ... for portability we might + * need to do something about other clock frequencies. + */ + cmos_rtc.rtc->irq_freq = 1024; + hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq); + CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT); + } /* disable irqs */ - cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE); + if (is_valid_irq(rtc_irq)) + cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE); rtc_control = CMOS_READ(RTC_CONTROL); @@ -802,14 +814,18 @@ cleanup1: cmos_rtc.dev = NULL; rtc_device_unregister(cmos_rtc.rtc); cleanup0: - release_region(ports->start, resource_size(ports)); + if (RTC_IOMAPPED) + release_region(ports->start, resource_size(ports)); + else + release_mem_region(ports->start, resource_size(ports)); return retval; } -static void cmos_do_shutdown(void) +static void cmos_do_shutdown(int rtc_irq) { spin_lock_irq(&rtc_lock); - cmos_irq_disable(&cmos_rtc, RTC_IRQMASK); + if (is_valid_irq(rtc_irq)) + cmos_irq_disable(&cmos_rtc, RTC_IRQMASK); spin_unlock_irq(&rtc_lock); } @@ -818,7 +834,7 @@ static void __exit cmos_do_remove(struct device *dev) struct cmos_rtc *cmos = dev_get_drvdata(dev); struct resource *ports; - cmos_do_shutdown(); + cmos_do_shutdown(cmos->irq); sysfs_remove_bin_file(&dev->kobj, &nvram); @@ -831,7 +847,10 @@ static void __exit cmos_do_remove(struct device *dev) cmos->rtc = NULL; ports = cmos->iomem; - release_region(ports->start, resource_size(ports)); + if (RTC_IOMAPPED) + release_region(ports->start, resource_size(ports)); + else + release_mem_region(ports->start, resource_size(ports)); cmos->iomem = NULL; cmos->dev = NULL; @@ -1065,10 +1084,13 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp) static void cmos_pnp_shutdown(struct pnp_dev *pnp) { - if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev)) + struct device *dev = &pnp->dev; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + + if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(dev)) return; - cmos_do_shutdown(); + cmos_do_shutdown(cmos->irq); } static const struct pnp_device_id rtc_ids[] = { @@ -1143,11 +1165,21 @@ static inline void cmos_of_init(struct platform_device *pdev) {} static int __init cmos_platform_probe(struct platform_device *pdev) { + struct resource *resource; + int irq; + cmos_of_init(pdev); cmos_wake_setup(&pdev->dev); - return cmos_do_probe(&pdev->dev, - platform_get_resource(pdev, IORESOURCE_IO, 0), - platform_get_irq(pdev, 0)); + + if (RTC_IOMAPPED) + resource = platform_get_resource(pdev, IORESOURCE_IO, 0); + else + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + irq = -1; + + return cmos_do_probe(&pdev->dev, resource, irq); } static int __exit cmos_platform_remove(struct platform_device *pdev) @@ -1158,10 +1190,13 @@ static int __exit cmos_platform_remove(struct platform_device *pdev) static void cmos_platform_shutdown(struct platform_device *pdev) { - if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pdev->dev)) + struct device *dev = &pdev->dev; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + + if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(dev)) return; - cmos_do_shutdown(); + cmos_do_shutdown(cmos->irq); } /* work with hotplug and coldplug */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 2f4e957af656..433e0c74d643 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -31,6 +31,10 @@ struct cmos_rtc_board_info { void (*wake_on)(struct device *dev); void (*wake_off)(struct device *dev); + u32 flags; +#define CMOS_RTC_FLAGS_NOFREQ (1 << 0) + int address_space; + u8 rtc_day_alarm; /* zero, or register index */ u8 rtc_mon_alarm; /* zero, or register index */ u8 rtc_century; /* zero, or register index */ -- cgit v1.2.3 From 4e52365f279564cef0ddd41db5237f0471381093 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 6 Jun 2014 14:36:42 -0700 Subject: ptrace: fix fork event messages across pid namespaces When tracing a process in another pid namespace, it's important for fork event messages to contain the child's pid as seen from the tracer's pid namespace, not the parent's. Otherwise, the tracer won't be able to correlate the fork event with later SIGTRAP signals it receives from the child. We still risk a race condition if a ptracer from a different pid namespace attaches after we compute the pid_t value. However, sending a bogus fork event message in this unlikely scenario is still a vast improvement over the status quo where we always send bogus fork event messages to debuggers in a different pid namespace than the forking process. Signed-off-by: Matthew Dempsky Acked-by: Oleg Nesterov Cc: Kees Cook Cc: Julien Tinnes Cc: Roland McGrath Cc: Jan Kratochvil Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ptrace.h | 32 ++++++++++++++++++++++++++++++++ kernel/fork.c | 10 +++++++--- 2 files changed, 39 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 07d0df6bf768..077904c8b70d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -5,6 +5,7 @@ #include /* For struct task_struct. */ #include /* for IS_ERR_VALUE */ #include /* For BUG_ON. */ +#include /* For task_active_pid_ns. */ #include /* @@ -128,6 +129,37 @@ static inline void ptrace_event(int event, unsigned long message) } } +/** + * ptrace_event_pid - possibly stop for a ptrace event notification + * @event: %PTRACE_EVENT_* value to report + * @pid: process identifier for %PTRACE_GETEVENTMSG to return + * + * Check whether @event is enabled and, if so, report @event and @pid + * to the ptrace parent. @pid is reported as the pid_t seen from the + * the ptrace parent's pid namespace. + * + * Called without locks. + */ +static inline void ptrace_event_pid(int event, struct pid *pid) +{ + /* + * FIXME: There's a potential race if a ptracer in a different pid + * namespace than parent attaches between computing message below and + * when we acquire tasklist_lock in ptrace_stop(). If this happens, + * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. + */ + unsigned long message = 0; + struct pid_namespace *ns; + + rcu_read_lock(); + ns = task_active_pid_ns(rcu_dereference(current->parent)); + if (ns) + message = pid_nr_ns(pid, ns); + rcu_read_unlock(); + + ptrace_event(event, message); +} + /** * ptrace_init_task - initialize ptrace state for a new child * @child: new child task diff --git a/kernel/fork.c b/kernel/fork.c index 0d53eb0dfb6f..d2799d1fc952 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1606,10 +1606,12 @@ long do_fork(unsigned long clone_flags, */ if (!IS_ERR(p)) { struct completion vfork; + struct pid *pid; trace_sched_process_fork(current, p); - nr = task_pid_vnr(p); + pid = get_task_pid(p, PIDTYPE_PID); + nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); @@ -1624,12 +1626,14 @@ long do_fork(unsigned long clone_flags, /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) - ptrace_event(trace, nr); + ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) - ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); + ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } + + put_pid(pid); } else { nr = PTR_ERR(p); } -- cgit v1.2.3 From 36fac0a214805bd7c8307cad1cde60a7b833266d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 6 Jun 2014 14:36:45 -0700 Subject: signals: kill sigfindinword() It has no users and it doesn't look useful. I do not know why/when it was introduced, I can't even find any user in the git history. Signed-off-by: Oleg Nesterov Acked-by: Geert Uytterhoeven Cc: Peter Zijlstra Cc: Al Viro Cc: David Woodhouse Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mathieu Desnoyers Cc: Richard Weinberger Cc: Steven Rostedt Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m68k/include/asm/signal.h | 9 --------- arch/x86/include/asm/signal.h | 6 ------ include/linux/signal.h | 5 ----- 3 files changed, 20 deletions(-) (limited to 'include/linux') diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 214320b50384..8c8ce5e1ee0e 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h @@ -60,15 +60,6 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) __const_sigismember(set,sig) : \ __gen_sigismember(set,sig)) -static inline int sigfindinword(unsigned long word) -{ - asm ("bfffo %1{#0,#0},%0" - : "=d" (word) - : "d" (word & -word) - : "cc"); - return word ^ 31; -} - #endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */ #ifndef __uClinux__ diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 35e67a457182..31eab867e6d3 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -92,12 +92,6 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) ? __const_sigismember((set), (sig)) \ : __gen_sigismember((set), (sig))) -static inline int sigfindinword(unsigned long word) -{ - asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); - return word; -} - struct pt_regs; #else /* __i386__ */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 2ac423bdb676..ae744c314630 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -63,11 +63,6 @@ static inline int sigismember(sigset_t *set, int _sig) return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } -static inline int sigfindinword(unsigned long word) -{ - return ffz(~word); -} - #endif /* __HAVE_ARCH_SIG_BITOPS */ static inline int sigisemptyset(sigset_t *set) -- cgit v1.2.3 From 0341729b4b832e753c5e745c6ba0e797f6198be0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 6 Jun 2014 14:36:53 -0700 Subject: signals: mv {dis,}allow_signal() from sched.h/exit.c to signal.[ch] Move the declaration/definition of allow_signal/disallow_signal to signal.h/signal.c. The new place is more logical and allows to use the static helpers in signal.c (see the next changes). While at it, make them return void and remove the valid_signal() check. Nobody checks the returned value, and in-kernel users must not pass the wrong signal number. Signed-off-by: Oleg Nesterov Cc: Peter Zijlstra Cc: Al Viro Cc: David Woodhouse Cc: Frederic Weisbecker Cc: Geert Uytterhoeven Cc: Ingo Molnar Cc: Mathieu Desnoyers Cc: Richard Weinberger Cc: Steven Rostedt Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 3 --- include/linux/signal.h | 2 ++ kernel/exit.c | 39 --------------------------------------- kernel/signal.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 31 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 8fcd0e6098d9..ea74596014a2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2414,9 +2414,6 @@ extern void flush_itimer_signals(void); extern void do_group_exit(int); -extern int allow_signal(int); -extern int disallow_signal(int); - extern int do_execve(struct filename *, const char __user * const __user *, const char __user * const __user *); diff --git a/include/linux/signal.h b/include/linux/signal.h index ae744c314630..ac83c593f4b9 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -284,6 +284,8 @@ extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping); extern void exit_signals(struct task_struct *tsk); +extern void allow_signal(int); +extern void disallow_signal(int); /* * Eventually that'll replace get_signal_to_deliver(); macro for now, diff --git a/kernel/exit.c b/kernel/exit.c index 750c2e594617..e5c4668f1799 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -313,45 +313,6 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) } } -/* - * Let kernel threads use this to say that they allow a certain signal. - * Must not be used if kthread was cloned with CLONE_SIGHAND. - */ -int allow_signal(int sig) -{ - if (!valid_signal(sig) || sig < 1) - return -EINVAL; - - spin_lock_irq(¤t->sighand->siglock); - /* This is only needed for daemonize()'ed kthreads */ - sigdelset(¤t->blocked, sig); - /* - * Kernel threads handle their own signals. Let the signal code - * know it'll be handled, so that they don't get converted to - * SIGKILL or just silently dropped. - */ - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - return 0; -} - -EXPORT_SYMBOL(allow_signal); - -int disallow_signal(int sig) -{ - if (!valid_signal(sig) || sig < 1) - return -EINVAL; - - spin_lock_irq(¤t->sighand->siglock); - current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - return 0; -} - -EXPORT_SYMBOL(disallow_signal); - #ifdef CONFIG_MEMCG /* * A task is exiting. If it owned this mm, find a new owner for the mm. diff --git a/kernel/signal.c b/kernel/signal.c index a6d8c3af0ad6..7d6ff8b18509 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3066,6 +3066,35 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, } #endif +/* + * Let kernel threads use this to say that they allow a certain signal. + * Must not be used if kthread was cloned with CLONE_SIGHAND. + */ +void allow_signal(int sig) +{ + spin_lock_irq(¤t->sighand->siglock); + /* This is only needed for daemonize()'ed kthreads */ + sigdelset(¤t->blocked, sig); + /* + * Kernel threads handle their own signals. Let the signal code + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ + current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); +} +EXPORT_SYMBOL(allow_signal); + +void disallow_signal(int sig) +{ + spin_lock_irq(¤t->sighand->siglock); + current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); +} +EXPORT_SYMBOL(disallow_signal); + int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { struct task_struct *p = current, *t; -- cgit v1.2.3 From b4e74264eb0b03f42097fa70a0766312156244a0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 6 Jun 2014 14:37:00 -0700 Subject: signals: introduce kernel_sigaction() Now that allow_signal() is really trivial we can unify it with disallow_signal(). Add the new helper, kernel_sigaction(), and reimplement allow_signal/disallow_signal as a trivial wrappers. This saves one EXPORT_SYMBOL() and the new helper can have more users. Signed-off-by: Oleg Nesterov Cc: Peter Zijlstra Cc: Al Viro Cc: David Woodhouse Cc: Frederic Weisbecker Cc: Geert Uytterhoeven Cc: Ingo Molnar Cc: Mathieu Desnoyers Cc: Richard Weinberger Cc: Steven Rostedt Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/signal.h | 18 ++++++++++++++++-- kernel/signal.c | 36 ++++++++++++------------------------ 2 files changed, 28 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index ac83c593f4b9..c9e65360c49a 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -284,8 +284,22 @@ extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping); extern void exit_signals(struct task_struct *tsk); -extern void allow_signal(int); -extern void disallow_signal(int); +extern void kernel_sigaction(int, __sighandler_t); + +static inline void allow_signal(int sig) +{ + /* + * Kernel threads handle their own signals. Let the signal code + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ + kernel_sigaction(sig, (__force __sighandler_t)2); +} + +static inline void disallow_signal(int sig) +{ + kernel_sigaction(sig, SIG_IGN); +} /* * Eventually that'll replace get_signal_to_deliver(); macro for now, diff --git a/kernel/signal.c b/kernel/signal.c index 3ec405132c79..a4077e90f19f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3067,37 +3067,25 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, #endif /* - * Let kernel threads use this to say that they allow a certain signal. - * Must not be used if kthread was cloned with CLONE_SIGHAND. + * For kthreads only, must not be used if cloned with CLONE_SIGHAND */ -void allow_signal(int sig) +void kernel_sigaction(int sig, __sighandler_t action) { - /* - * Kernel threads handle their own signals. Let the signal code - * know it'll be handled, so that they don't get converted to - * SIGKILL or just silently dropped. - */ spin_lock_irq(¤t->sighand->siglock); - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; - spin_unlock_irq(¤t->sighand->siglock); -} -EXPORT_SYMBOL(allow_signal); + current->sighand->action[sig - 1].sa.sa_handler = action; + if (action == SIG_IGN) { + sigset_t mask; -void disallow_signal(int sig) -{ - sigset_t mask; + sigemptyset(&mask); + sigaddset(&mask, sig); - sigemptyset(&mask); - sigaddset(&mask, sig); - - spin_lock_irq(¤t->sighand->siglock); - current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; - flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); - flush_sigqueue_mask(&mask, ¤t->pending); - recalc_sigpending(); + flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); + flush_sigqueue_mask(&mask, ¤t->pending); + recalc_sigpending(); + } spin_unlock_irq(¤t->sighand->siglock); } -EXPORT_SYMBOL(disallow_signal); +EXPORT_SYMBOL(kernel_sigaction); int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { -- cgit v1.2.3 From dcbff5d1effbbd52be1ed9f2efb6c8d0445ad188 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 6 Jun 2014 14:37:15 -0700 Subject: idr: reorder the fields idr_layer->layer is always accessed in read path, move it in the front. idr_layer->bitmap is moved on the bottom. And rcu_head shares with bitmap due to they do not be accessed at the same time. idr->id_free/id_free_cnt/lock are free list fields, and moved to the bottom. They will be removed in near future. Signed-off-by: Lai Jiangshan Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/idr.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/idr.h b/include/linux/idr.h index 6af3400b9b2f..013fd9bc4cb6 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -29,21 +29,24 @@ struct idr_layer { int prefix; /* the ID prefix of this idr_layer */ - DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */ + int layer; /* distance from leaf */ struct idr_layer __rcu *ary[1< Date: Fri, 6 Jun 2014 14:37:42 -0700 Subject: ipc/shm.c: increase the defaults for SHMALL, SHMMAX System V shared memory a) can be abused to trigger out-of-memory conditions and the standard measures against out-of-memory do not work: - it is not possible to use setrlimit to limit the size of shm segments. - segments can exist without association with any processes, thus the oom-killer is unable to free that memory. b) is typically used for shared information - today often multiple GB. (e.g. database shared buffers) The current default is a maximum segment size of 32 MB and a maximum total size of 8 GB. This is often too much for a) and not enough for b), which means that lots of users must change the defaults. This patch increases the default limits (nearly) to the maximum, which is perfect for case b). The defaults are used after boot and as the initial value for each new namespace. Admins/distros that need a protection against a) should reduce the limits and/or enable shm_rmid_forced. Unix has historically required setting these limits for shared memory, and Linux inherited such behavior. The consequence of this is added complexity for users and administrators. One very common example are Database setup/installation documents and scripts, where users must manually calculate the values for these limits. This also requires (some) knowledge of how the underlying memory management works, thus causing, in many occasions, the limits to just be flat out wrong. Disabling these limits sooner could have saved companies a lot of time, headaches and money for support. But it's never too late, simplify users life now. Further notes: - The patch only changes default, overrides behave as before: # sysctl kernel.shmall=33554432 would recreate the previous limit for SHMMAX (for the current namespace). - Disabling sysv shm allocation is possible with: # sysctl kernel.shmall=0 (not a new feature, also per-namespace) - The limits are intentionally set to a value slightly less than ULONG_MAX, to avoid triggering overflows in user space apps. [not unreasonable, see http://marc.info/?l=linux-mm&m=139638334330127] Signed-off-by: Manfred Spraul Signed-off-by: Davidlohr Bueso Reported-by: Davidlohr Bueso Acked-by: Michael Kerrisk Acked-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shm.h | 3 +-- include/uapi/linux/shm.h | 8 +++----- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/shm.h b/include/linux/shm.h index 1e2cd2e6b540..57d77709fbe2 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -3,9 +3,8 @@ #include #include - -#define SHMALL (SHMMAX/PAGE_SIZE*(SHMMNI/16)) /* max shm system wide (pages) */ #include + struct shmid_kernel /* private to the kernel */ { struct kern_ipc_perm shm_perm; diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h index 78b69413f582..74e786de6f4e 100644 --- a/include/uapi/linux/shm.h +++ b/include/uapi/linux/shm.h @@ -9,15 +9,13 @@ /* * SHMMAX, SHMMNI and SHMALL are upper limits are defaults which can - * be increased by sysctl + * be modified by sysctl. */ -#define SHMMAX 0x2000000 /* max shared seg size (bytes) */ #define SHMMIN 1 /* min shared seg size (bytes) */ #define SHMMNI 4096 /* max num of segs system wide */ -#ifndef __KERNEL__ -#define SHMALL (SHMMAX/getpagesize()*(SHMMNI/16)) -#endif +#define SHMMAX (ULONG_MAX - (1L<<24)) /* max shared seg size (bytes) */ +#define SHMALL (ULONG_MAX - (1L<<24)) /* max shm system wide (pages) */ #define SHMSEG SHMMNI /* max shared segs per process */ -- cgit v1.2.3 From d6f50c95e0e44fa722852ae24aa51d4b7f0d56ed Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 6 Jun 2014 14:38:06 -0700 Subject: key: convert use of typedef ctl_table to struct ctl_table This typedef is unnecessary and should just be removed. Signed-off-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/key.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/key.h b/include/linux/key.h index 80d677483e31..3ae45f09589b 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -332,7 +332,7 @@ do { \ } while (0) #ifdef CONFIG_SYSCTL -extern ctl_table key_sysctls[]; +extern struct ctl_table key_sysctls[]; #endif /* * the userspace interface -- cgit v1.2.3 From ffe2c748e283c5dc1b9b9ac116299dbfc11a609b Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 6 Jun 2014 14:38:17 -0700 Subject: mm: introduce kmemleak_update_trace() The memory allocation stack trace is not always useful for debugging a memory leak (e.g. radix_tree_preload). This function, when called, updates the stack trace for an already allocated object. Signed-off-by: Catalin Marinas Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kmemleak.txt | 1 + include/linux/kmemleak.h | 4 ++++ mm/kmemleak.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) (limited to 'include/linux') diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt index a7563ec4ea7b..b772418bf064 100644 --- a/Documentation/kmemleak.txt +++ b/Documentation/kmemleak.txt @@ -142,6 +142,7 @@ kmemleak_alloc_percpu - notify of a percpu memory block allocation kmemleak_free - notify of a memory block freeing kmemleak_free_part - notify of a partial memory block freeing kmemleak_free_percpu - notify of a percpu memory block freeing +kmemleak_update_trace - update object allocation stack trace kmemleak_not_leak - mark an object as not a leak kmemleak_ignore - do not scan or report an object as leak kmemleak_scan_area - add scan areas inside a memory block diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 5bb424659c04..057e95971014 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -30,6 +30,7 @@ extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; extern void kmemleak_free(const void *ptr) __ref; extern void kmemleak_free_part(const void *ptr, size_t size) __ref; extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; +extern void kmemleak_update_trace(const void *ptr) __ref; extern void kmemleak_not_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; @@ -83,6 +84,9 @@ static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) static inline void kmemleak_free_percpu(const void __percpu *ptr) { } +static inline void kmemleak_update_trace(const void *ptr) +{ +} static inline void kmemleak_not_leak(const void *ptr) { } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5d4aec44982e..3cda50c1e394 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -989,6 +989,40 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr) } EXPORT_SYMBOL_GPL(kmemleak_free_percpu); +/** + * kmemleak_update_trace - update object allocation stack trace + * @ptr: pointer to beginning of the object + * + * Override the object allocation stack trace for cases where the actual + * allocation place is not always useful. + */ +void __ref kmemleak_update_trace(const void *ptr) +{ + struct kmemleak_object *object; + unsigned long flags; + + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) + return; + + object = find_and_get_object((unsigned long)ptr, 1); + if (!object) { +#ifdef DEBUG + kmemleak_warn("Updating stack trace for unknown object at %p\n", + ptr); +#endif + return; + } + + spin_lock_irqsave(&object->lock, flags); + object->trace_len = __save_stack_trace(object->trace); + spin_unlock_irqrestore(&object->lock, flags); + + put_object(object); +} +EXPORT_SYMBOL(kmemleak_update_trace); + /** * kmemleak_not_leak - mark an allocated object as false positive * @ptr: pointer to beginning of the object -- cgit v1.2.3 From ae022622ae9447bd70e59db7c91efa25c99a90d5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 6 Jun 2014 14:38:31 -0700 Subject: idle: remove cpu_idle() forward declarations After all architectures were converted to the generic idle framework, commit d190e8195b90 ("idle: Remove GENERIC_IDLE_LOOP config switch") removed the last caller of cpu_idle(). The forward declarations in header files were forgotten. Signed-off-by: Geert Uytterhoeven Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpu.h | 1 - include/linux/smp.h | 2 -- 2 files changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 81887120395c..95978ad7fcdd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -256,7 +256,6 @@ enum cpuhp_state { }; void cpu_startup_entry(enum cpuhp_state state); -void cpu_idle(void); void cpu_idle_poll_ctrl(bool enable); diff --git a/include/linux/smp.h b/include/linux/smp.h index 633f5edd7470..34347f26be9b 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -13,8 +13,6 @@ #include #include -extern void cpu_idle(void); - typedef void (*smp_call_func_t)(void *info); struct call_single_data { struct llist_node llist; -- cgit v1.2.3