summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/achware.h3
-rw-r--r--drivers/acpi/acpica/aclocal.h23
-rw-r--r--drivers/acpi/acpica/acmacros.h29
-rw-r--r--drivers/acpi/acpica/dswload.c14
-rw-r--r--drivers/acpi/acpica/dswload2.c14
-rw-r--r--drivers/acpi/acpica/evgpe.c24
-rw-r--r--drivers/acpi/acpica/evxfgpe.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c15
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c1
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c20
-rw-r--r--drivers/acpi/acpica/tbxface.c41
-rw-r--r--drivers/acpi/acpica/utosi.c1
-rw-r--r--drivers/acpi/acpica/utxface.c354
-rw-r--r--drivers/acpi/acpica/utxfinit.c317
-rw-r--r--drivers/acpi/button.c13
-rw-r--r--drivers/acpi/fan.c22
-rw-r--r--drivers/acpi/glue.c135
-rw-r--r--drivers/acpi/hed.c20
-rw-r--r--drivers/acpi/proc.c57
-rw-r--r--drivers/acpi/sbshc.c18
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/utils.c11
-rw-r--r--drivers/block/nvme.c153
-rw-r--r--drivers/block/rbd.c7
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c4
-rw-r--r--drivers/edac/sb_edac.c7
-rw-r--r--drivers/gpio/gpio-lpc32xx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c3
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/i2c/busses/i2c-scmi.c14
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/input/misc/atlas_btns.c17
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/md/dm-mpath.c11
-rw-r--r--drivers/md/dm-table.c61
-rw-r--r--drivers/md/dm-thin.c135
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/mtd/mtdchar.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c4
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/micrel.c45
-rw-r--r--drivers/net/phy/smsc.c28
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/team/team.c44
-rw-r--r--drivers/net/usb/smsc75xx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1
-rw-r--r--drivers/platform/x86/hp_accel.c25
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/platform/x86/topstar-laptop.c22
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c22
-rw-r--r--drivers/platform/x86/xo15-ebook.c14
-rw-r--r--drivers/pnp/pnpacpi/core.c7
-rw-r--r--drivers/sh/pfc/pinctrl.c2
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/core/usb-acpi.c5
-rw-r--r--drivers/usb/host/ohci-at91.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c76
72 files changed, 1231 insertions, 822 deletions
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 0a1b3435f920..7f1d40797e80 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -158,5 +158,6 @@ acpi-y += \
utresrc.o \
utstate.o \
utxface.o \
+ utxfinit.o \
utxferror.o \
utxfmutex.o
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5de4ec72766d..d902d31abc6c 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -110,8 +110,7 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
/*
* hwgpe - GPE support
*/
-u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
- struct acpi_gpe_register_info *gpe_register_info);
+u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index cc80fe10e8ea..c816ee675094 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -707,15 +707,18 @@ union acpi_parse_value {
u8 disasm_opcode; /* Subtype used for disassembly */\
char aml_op_name[16]) /* Op name (debug only) */
-#define ACPI_DASM_BUFFER 0x00
-#define ACPI_DASM_RESOURCE 0x01
-#define ACPI_DASM_STRING 0x02
-#define ACPI_DASM_UNICODE 0x03
-#define ACPI_DASM_EISAID 0x04
-#define ACPI_DASM_MATCHOP 0x05
-#define ACPI_DASM_LNOT_PREFIX 0x06
-#define ACPI_DASM_LNOT_SUFFIX 0x07
-#define ACPI_DASM_IGNORE 0x08
+/* Flags for disasm_flags field above */
+
+#define ACPI_DASM_BUFFER 0x00 /* Buffer is a simple data buffer */
+#define ACPI_DASM_RESOURCE 0x01 /* Buffer is a Resource Descriptor */
+#define ACPI_DASM_STRING 0x02 /* Buffer is a ASCII string */
+#define ACPI_DASM_UNICODE 0x03 /* Buffer is a Unicode string */
+#define ACPI_DASM_PLD_METHOD 0x04 /* Buffer is a _PLD method bit-packed buffer */
+#define ACPI_DASM_EISAID 0x05 /* Integer is an EISAID */
+#define ACPI_DASM_MATCHOP 0x06 /* Parent opcode is a Match() operator */
+#define ACPI_DASM_LNOT_PREFIX 0x07 /* Start of a Lnot_equal (etc.) pair of opcodes */
+#define ACPI_DASM_LNOT_SUFFIX 0x08 /* End of a Lnot_equal (etc.) pair of opcodes */
+#define ACPI_DASM_IGNORE 0x09 /* Not used at this time */
/*
* Generic operation (for example: If, While, Store)
@@ -932,6 +935,7 @@ struct acpi_bit_register_info {
#define ACPI_OSI_WIN_VISTA_SP1 0x09
#define ACPI_OSI_WIN_VISTA_SP2 0x0A
#define ACPI_OSI_WIN_7 0x0B
+#define ACPI_OSI_WIN_8 0x0C
#define ACPI_ALWAYS_ILLEGAL 0x00
@@ -1024,6 +1028,7 @@ struct acpi_port_info {
****************************************************************************/
struct acpi_db_method_info {
+ acpi_handle method;
acpi_handle main_thread_gate;
acpi_handle thread_complete_gate;
acpi_thread_id *threads;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 832b6198652e..a7f68c47f517 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -277,10 +277,33 @@
/* Bitfields within ACPI registers */
-#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask)
-#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
+#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) \
+ ((val << pos) & mask)
-#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
+#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) \
+ reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
+
+#define ACPI_INSERT_BITS(target, mask, source) \
+ target = ((target & (~(mask))) | (source & mask))
+
+/* Generic bitfield macros and masks */
+
+#define ACPI_GET_BITS(source_ptr, position, mask) \
+ ((*source_ptr >> position) & mask)
+
+#define ACPI_SET_BITS(target_ptr, position, mask, value) \
+ (*target_ptr |= ((value & mask) << position))
+
+#define ACPI_1BIT_MASK 0x00000001
+#define ACPI_2BIT_MASK 0x00000003
+#define ACPI_3BIT_MASK 0x00000007
+#define ACPI_4BIT_MASK 0x0000000F
+#define ACPI_5BIT_MASK 0x0000001F
+#define ACPI_6BIT_MASK 0x0000003F
+#define ACPI_7BIT_MASK 0x0000007F
+#define ACPI_8BIT_MASK 0x000000FF
+#define ACPI_16BIT_MASK 0x0000FFFF
+#define ACPI_24BIT_MASK 0x00FFFFFF
/*
* An object of type struct acpi_namespace_node can appear in some contexts
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 552aa3a50c84..557510084c7a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -230,6 +230,20 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
+ case ACPI_TYPE_METHOD:
+
+ /*
+ * Allow scope change to root during execution of module-level
+ * code. Root is typed METHOD during this time.
+ */
+ if ((node == acpi_gbl_root_node) &&
+ (walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+ break;
+ }
+
+ /*lint -fallthrough */
+
default:
/* All other types are an error */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index ae7147724763..89c0114210c0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -230,6 +230,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
+ case ACPI_TYPE_METHOD:
+
+ /*
+ * Allow scope change to root during execution of module-level
+ * code. Root is typed METHOD during this time.
+ */
+ if ((node == acpi_gbl_root_node) &&
+ (walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+ break;
+ }
+
+ /*lint -fallthrough */
+
default:
/* All other types are an error */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index afbd5cb391f6..ef0193d74b5d 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -80,8 +80,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* Clear the run bit up front */
@@ -379,6 +378,18 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
*/
if (!(gpe_register_info->enable_for_run |
gpe_register_info->enable_for_wake)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ "Ignore disabled registers for GPE%02X-GPE%02X: "
+ "RunEnable=%02X, WakeEnable=%02X\n",
+ gpe_register_info->
+ base_gpe_number,
+ gpe_register_info->
+ base_gpe_number +
+ (ACPI_GPE_REGISTER_WIDTH - 1),
+ gpe_register_info->
+ enable_for_run,
+ gpe_register_info->
+ enable_for_wake));
continue;
}
@@ -401,9 +412,14 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
}
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
- "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
+ "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, "
+ "RunEnable=%02X, WakeEnable=%02X\n",
gpe_register_info->base_gpe_number,
- status_reg, enable_reg));
+ gpe_register_info->base_gpe_number +
+ (ACPI_GPE_REGISTER_WIDTH - 1),
+ status_reg, enable_reg,
+ gpe_register_info->enable_for_run,
+ gpe_register_info->enable_for_wake));
/* Check if there is anything active at all in this register */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 6affbdb4b88c..87c5f2332260 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -357,8 +357,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 ac
goto unlock_and_exit;
}
- register_bit =
- acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* Perform the action */
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 25bd28c4ae8d..db4076580e2b 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -60,7 +60,6 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
* FUNCTION: acpi_hw_get_gpe_register_bit
*
* PARAMETERS: gpe_event_info - Info block for the GPE
- * gpe_register_info - Info block for the GPE register
*
* RETURN: Register mask with a one in the GPE bit position
*
@@ -69,11 +68,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
******************************************************************************/
-u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
- struct acpi_gpe_register_info *gpe_register_info)
+u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
{
return (u32)1 << (gpe_event_info->gpe_number -
- gpe_register_info->base_gpe_number);
+ gpe_event_info->register_info->base_gpe_number);
}
/******************************************************************************
@@ -115,8 +113,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Set or clear just the bit that corresponds to this GPE */
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
switch (action) {
case ACPI_GPE_CONDITIONAL_ENABLE:
@@ -178,8 +175,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
- register_bit =
- acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
status = acpi_hw_write(register_bit,
&gpe_register_info->status_address);
@@ -222,8 +218,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
/* Get the register bitmask for this GPE */
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* GPE currently enabled? (enabled for runtime?) */
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 1f165a750ae2..0ff1ecea5c3a 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -381,7 +381,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* FUNCTION: acpi_leave_sleep_state_prep
*
* PARAMETERS: sleep_state - Which sleep state we are exiting
- * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 7ee4e6aeb0a2..2526aaf945ee 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -264,7 +264,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
switch (type) {
case ACPI_TYPE_PROCESSOR:
- acpi_os_printf("ID %X Len %.4X Addr %p\n",
+ acpi_os_printf("ID %02X Len %02X Addr %p\n",
obj_desc->processor.proc_id,
obj_desc->processor.length,
ACPI_CAST_PTR(void,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 74f97d74db1c..70f9d787c82c 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -350,6 +350,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
acpi_status acpi_tb_resize_root_table_list(void)
{
struct acpi_table_desc *tables;
+ u32 table_count;
ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
@@ -363,8 +364,13 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Increase the Table Array size */
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
- max_table_count +
+ if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ table_count = acpi_gbl_root_table_list.max_table_count;
+ } else {
+ table_count = acpi_gbl_root_table_list.current_table_count;
+ }
+
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -377,8 +383,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) acpi_gbl_root_table_list.
- max_table_count * sizeof(struct acpi_table_desc));
+ (acpi_size) table_count *
+ sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -386,9 +392,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count +=
- ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
+ acpi_gbl_root_table_list.max_table_count =
+ table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 29e51bc01383..21101262e47a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -159,14 +159,12 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
* DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
* root list from the previously provided scratch area. Should
* be called once dynamic memory allocation is available in the
- * kernel
+ * kernel.
*
******************************************************************************/
acpi_status acpi_reallocate_root_table(void)
{
- struct acpi_table_desc *tables;
- acpi_size new_size;
- acpi_size current_size;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -178,39 +176,10 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
- /*
- * Get the current size of the root table and add the default
- * increment to create the new table size.
- */
- current_size = (acpi_size)
- acpi_gbl_root_table_list.current_table_count *
- sizeof(struct acpi_table_desc);
-
- new_size = current_size +
- (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
-
- /* Create new array and copy the old array */
-
- tables = ACPI_ALLOCATE_ZEROED(new_size);
- if (!tables) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
-
- /*
- * Update the root table descriptor. The new size will be the current
- * number of tables plus the increment, independent of the reserved
- * size of the original table list.
- */
- acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count =
- acpi_gbl_root_table_list.current_table_count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags =
- ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
-
- return_ACPI_STATUS(AE_OK);
+ status = acpi_tb_resize_root_table_list();
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 34ef0bd7e4b4..676285d6116d 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -73,6 +73,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
+ {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 534179f1177b..b09632b4f5b3 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: utxface - External interfaces for "global" ACPI functions
+ * Module Name: utxface - External interfaces, miscellaneous utility functions
*
*****************************************************************************/
@@ -53,271 +53,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
-#ifndef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION: acpi_initialize_subsystem
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initializes all global variables. This is the first function
- * called, so any early initialization belongs here.
- *
- ******************************************************************************/
-acpi_status __init acpi_initialize_subsystem(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
-
- acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
- ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
-
- /* Initialize the OS-Dependent layer */
-
- status = acpi_os_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
- return_ACPI_STATUS(status);
- }
-
- /* Initialize all globals used by the subsystem */
-
- status = acpi_ut_init_globals();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During initialization of globals"));
- return_ACPI_STATUS(status);
- }
-
- /* Create the default mutex objects */
-
- status = acpi_ut_mutex_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Global Mutex creation"));
- return_ACPI_STATUS(status);
- }
-
- /*
- * Initialize the namespace manager and
- * the root of the namespace tree
- */
- status = acpi_ns_root_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Namespace initialization"));
- return_ACPI_STATUS(status);
- }
-
- /* Initialize the global OSI interfaces list with the static names */
-
- status = acpi_ut_initialize_interfaces();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During OSI interfaces initialization"));
- return_ACPI_STATUS(status);
- }
-
- /* If configured, initialize the AML debugger */
-
- ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_enable_subsystem
- *
- * PARAMETERS: flags - Init/enable Options
- *
- * RETURN: Status
- *
- * DESCRIPTION: Completes the subsystem initialization including hardware.
- * Puts system into ACPI mode if it isn't already.
- *
- ******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
-{
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
-
-#if (!ACPI_REDUCED_HARDWARE)
-
- /* Enable ACPI mode */
-
- if (!(flags & ACPI_NO_ACPI_ENABLE)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Going into ACPI mode\n"));
-
- acpi_gbl_original_mode = acpi_hw_get_mode();
-
- status = acpi_enable();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Obtain a permanent mapping for the FACS. This is required for the
- * Global Lock and the Firmware Waking Vector
- */
- status = acpi_tb_initialize_facs();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
- return_ACPI_STATUS(status);
- }
-#endif /* !ACPI_REDUCED_HARDWARE */
-
- /*
- * Install the default op_region handlers. These are installed unless
- * other handlers have already been installed via the
- * install_address_space_handler interface.
- */
- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Installing default address space handlers\n"));
-
- status = acpi_ev_install_region_handlers();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-#if (!ACPI_REDUCED_HARDWARE)
- /*
- * Initialize ACPI Event handling (Fixed and General Purpose)
- *
- * Note1: We must have the hardware and events initialized before we can
- * execute any control methods safely. Any control method can require
- * ACPI hardware support, so the hardware must be fully initialized before
- * any method execution!
- *
- * Note2: Fixed events are initialized and enabled here. GPEs are
- * initialized, but cannot be enabled until after the hardware is
- * completely initialized (SCI and global_lock activated)
- */
- if (!(flags & ACPI_NO_EVENT_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Initializing ACPI events\n"));
-
- status = acpi_ev_initialize_events();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Install the SCI handler and Global Lock handler. This completes the
- * hardware initialization.
- */
- if (!(flags & ACPI_NO_HANDLER_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Installing SCI/GL handlers\n"));
-
- status = acpi_ev_install_xrupt_handlers();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-#endif /* !ACPI_REDUCED_HARDWARE */
-
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_initialize_objects
- *
- * PARAMETERS: flags - Init/enable Options
- *
- * RETURN: Status
- *
- * DESCRIPTION: Completes namespace initialization by initializing device
- * objects and executing AML code for Regions, buffers, etc.
- *
- ******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
-{
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(acpi_initialize_objects);
-
- /*
- * Run all _REG methods
- *
- * Note: Any objects accessed by the _REG methods will be automatically
- * initialized, even if they contain executable AML (see the call to
- * acpi_ns_initialize_objects below).
- */
- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Executing _REG OpRegion methods\n"));
-
- status = acpi_ev_initialize_op_regions();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Execute any module-level code that was detected during the table load
- * phase. Although illegal since ACPI 2.0, there are many machines that
- * contain this type of code. Each block of detected executable AML code
- * outside of any control method is wrapped with a temporary control
- * method object and placed on a global list. The methods on this list
- * are executed below.
- */
- acpi_ns_exec_module_code_list();
-
- /*
- * Initialize the objects that remain uninitialized. This runs the
- * executable AML that may be part of the declaration of these objects:
- * operation_regions, buffer_fields, Buffers, and Packages.
- */
- if (!(flags & ACPI_NO_OBJECT_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Completing Initialization of ACPI Objects\n"));
-
- status = acpi_ns_initialize_objects();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Initialize all device objects in the namespace. This runs the device
- * _STA and _INI methods.
- */
- if (!(flags & ACPI_NO_DEVICE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Initializing ACPI Devices\n"));
-
- status = acpi_ns_initialize_devices();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Empty the caches (delete the cached objects) on the assumption that
- * the table load filled them up more than they will be at runtime --
- * thus wasting non-paged memory.
- */
- status = acpi_purge_cached_objects();
-
- acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
-
-#endif
/*******************************************************************************
*
* FUNCTION: acpi_terminate
@@ -683,3 +418,90 @@ acpi_check_address_range(acpi_adr_space_type space_id,
ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_decode_pld_buffer
+ *
+ * PARAMETERS: in_buffer - Buffer returned by _PLD method
+ * length - Length of the in_buffer
+ * return_buffer - Where the decode buffer is returned
+ *
+ * RETURN: Status and the decoded _PLD buffer. User must deallocate
+ * the buffer via ACPI_FREE.
+ *
+ * DESCRIPTION: Decode the bit-packed buffer returned by the _PLD method into
+ * a local struct that is much more useful to an ACPI driver.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_decode_pld_buffer(u8 *in_buffer,
+ acpi_size length, struct acpi_pld_info ** return_buffer)
+{
+ struct acpi_pld_info *pld_info;
+ u32 *buffer = ACPI_CAST_PTR(u32, in_buffer);
+ u32 dword;
+
+ /* Parameter validation */
+
+ if (!in_buffer || !return_buffer || (length < 16)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ pld_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pld_info));
+ if (!pld_info) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* First 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[0]);
+ pld_info->revision = ACPI_PLD_GET_REVISION(&dword);
+ pld_info->ignore_color = ACPI_PLD_GET_IGNORE_COLOR(&dword);
+ pld_info->color = ACPI_PLD_GET_COLOR(&dword);
+
+ /* Second 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[1]);
+ pld_info->width = ACPI_PLD_GET_WIDTH(&dword);
+ pld_info->height = ACPI_PLD_GET_HEIGHT(&dword);
+
+ /* Third 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[2]);
+ pld_info->user_visible = ACPI_PLD_GET_USER_VISIBLE(&dword);
+ pld_info->dock = ACPI_PLD_GET_DOCK(&dword);
+ pld_info->lid = ACPI_PLD_GET_LID(&dword);
+ pld_info->panel = ACPI_PLD_GET_PANEL(&dword);
+ pld_info->vertical_position = ACPI_PLD_GET_VERTICAL(&dword);
+ pld_info->horizontal_position = ACPI_PLD_GET_HORIZONTAL(&dword);
+ pld_info->shape = ACPI_PLD_GET_SHAPE(&dword);
+ pld_info->group_orientation = ACPI_PLD_GET_ORIENTATION(&dword);
+ pld_info->group_token = ACPI_PLD_GET_TOKEN(&dword);
+ pld_info->group_position = ACPI_PLD_GET_POSITION(&dword);
+ pld_info->bay = ACPI_PLD_GET_BAY(&dword);
+
+ /* Fourth 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[3]);
+ pld_info->ejectable = ACPI_PLD_GET_EJECTABLE(&dword);
+ pld_info->ospm_eject_required = ACPI_PLD_GET_OSPM_EJECT(&dword);
+ pld_info->cabinet_number = ACPI_PLD_GET_CABINET(&dword);
+ pld_info->card_cage_number = ACPI_PLD_GET_CARD_CAGE(&dword);
+ pld_info->reference = ACPI_PLD_GET_REFERENCE(&dword);
+ pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword);
+ pld_info->order = ACPI_PLD_GET_ORDER(&dword);
+
+ if (length >= ACPI_PLD_BUFFER_SIZE) {
+
+ /* Fifth 32-bit DWord (Revision 2 of _PLD) */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[4]);
+ pld_info->vertical_offset = ACPI_PLD_GET_VERT_OFFSET(&dword);
+ pld_info->horizontal_offset = ACPI_PLD_GET_HORIZ_OFFSET(&dword);
+ }
+
+ *return_buffer = pld_info;
+ return (AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_decode_pld_buffer)
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
new file mode 100644
index 000000000000..14f523627a5e
--- /dev/null
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -0,0 +1,317 @@
+/******************************************************************************
+ *
+ * Module Name: utxfinit - External interfaces for ACPICA initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <linux/export.h>
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acdebug.h"
+#include "actables.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfinit")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_subsystem
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initializes all global variables. This is the first function
+ * called, so any early initialization belongs here.
+ *
+ ******************************************************************************/
+acpi_status acpi_initialize_subsystem(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
+
+ acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
+ ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
+
+ /* Initialize the OS-Dependent layer */
+
+ status = acpi_os_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize all globals used by the subsystem */
+
+ status = acpi_ut_init_globals();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During initialization of globals"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Create the default mutex objects */
+
+ status = acpi_ut_mutex_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Global Mutex creation"));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the namespace manager and
+ * the root of the namespace tree
+ */
+ status = acpi_ns_root_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Namespace initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize the global OSI interfaces list with the static names */
+
+ status = acpi_ut_initialize_interfaces();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During OSI interfaces initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* If configured, initialize the AML debugger */
+
+ ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_subsystem
+ *
+ * PARAMETERS: flags - Init/enable Options
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes the subsystem initialization including hardware.
+ * Puts system into ACPI mode if it isn't already.
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_subsystem(u32 flags)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+ /* Enable ACPI mode */
+
+ if (!(flags & ACPI_NO_ACPI_ENABLE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Going into ACPI mode\n"));
+
+ acpi_gbl_original_mode = acpi_hw_get_mode();
+
+ status = acpi_enable();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Obtain a permanent mapping for the FACS. This is required for the
+ * Global Lock and the Firmware Waking Vector
+ */
+ status = acpi_tb_initialize_facs();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+ return_ACPI_STATUS(status);
+ }
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+ /*
+ * Install the default op_region handlers. These are installed unless
+ * other handlers have already been installed via the
+ * install_address_space_handler interface.
+ */
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Installing default address space handlers\n"));
+
+ status = acpi_ev_install_region_handlers();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+#if (!ACPI_REDUCED_HARDWARE)
+ /*
+ * Initialize ACPI Event handling (Fixed and General Purpose)
+ *
+ * Note1: We must have the hardware and events initialized before we can
+ * execute any control methods safely. Any control method can require
+ * ACPI hardware support, so the hardware must be fully initialized before
+ * any method execution!
+ *
+ * Note2: Fixed events are initialized and enabled here. GPEs are
+ * initialized, but cannot be enabled until after the hardware is
+ * completely initialized (SCI and global_lock activated) and the various
+ * initialization control methods are run (_REG, _STA, _INI) on the
+ * entire namespace.
+ */
+ if (!(flags & ACPI_NO_EVENT_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI events\n"));
+
+ status = acpi_ev_initialize_events();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Install the SCI handler and Global Lock handler. This completes the
+ * hardware initialization.
+ */
+ if (!(flags & ACPI_NO_HANDLER_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Installing SCI/GL handlers\n"));
+
+ status = acpi_ev_install_xrupt_handlers();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_objects
+ *
+ * PARAMETERS: flags - Init/enable Options
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes namespace initialization by initializing device
+ * objects and executing AML code for Regions, buffers, etc.
+ *
+ ******************************************************************************/
+acpi_status acpi_initialize_objects(u32 flags)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_objects);
+
+ /*
+ * Run all _REG methods
+ *
+ * Note: Any objects accessed by the _REG methods will be automatically
+ * initialized, even if they contain executable AML (see the call to
+ * acpi_ns_initialize_objects below).
+ */
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Executing _REG OpRegion methods\n"));
+
+ status = acpi_ev_initialize_op_regions();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Execute any module-level code that was detected during the table load
+ * phase. Although illegal since ACPI 2.0, there are many machines that
+ * contain this type of code. Each block of detected executable AML code
+ * outside of any control method is wrapped with a temporary control
+ * method object and placed on a global list. The methods on this list
+ * are executed below.
+ */
+ acpi_ns_exec_module_code_list();
+
+ /*
+ * Initialize the objects that remain uninitialized. This runs the
+ * executable AML that may be part of the declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ if (!(flags & ACPI_NO_OBJECT_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Completing Initialization of ACPI Objects\n"));
+
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Initialize all device objects in the namespace. This runs the device
+ * _STA and _INI methods.
+ */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI Devices\n"));
+
+ status = acpi_ns_initialize_devices();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Empty the caches (delete the cached objects) on the assumption that
+ * the table load filled them up more than they will be at runtime --
+ * thus wasting non-paged memory.
+ */
+ status = acpi_purge_cached_objects();
+
+ acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 314a3b84bbc7..f0d936b65e37 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -450,15 +450,4 @@ static int acpi_button_remove(struct acpi_device *device, int type)
return 0;
}
-static int __init acpi_button_init(void)
-{
- return acpi_bus_register_driver(&acpi_button_driver);
-}
-
-static void __exit acpi_button_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_button_driver);
-}
-
-module_init(acpi_button_init);
-module_exit(acpi_button_exit);
+module_acpi_driver(acpi_button_driver);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index bc36a476f1ab..3bd6a54702d6 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -212,24 +212,4 @@ static int acpi_fan_resume(struct device *dev)
}
#endif
-static int __init acpi_fan_init(void)
-{
- int result = 0;
-
- result = acpi_bus_register_driver(&acpi_fan_driver);
- if (result < 0)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit acpi_fan_exit(void)
-{
-
- acpi_bus_unregister_driver(&acpi_fan_driver);
-
- return;
-}
-
-module_init(acpi_fan_init);
-module_exit(acpi_fan_exit);
+module_acpi_driver(acpi_fan_driver);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 243ee85e4d2e..d1a2d74033e9 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -25,6 +25,8 @@
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
+#define PHYSICAL_NODE_STRING "physical_node"
+
int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
@@ -124,84 +126,119 @@ acpi_handle acpi_get_child(acpi_handle parent, u64 address)
EXPORT_SYMBOL(acpi_get_child);
-/* Link ACPI devices with physical devices */
-static void acpi_glue_data_handler(acpi_handle handle,
- void *context)
-{
- /* we provide an empty handler */
-}
-
-/* Note: a success call will increase reference count by one */
-struct device *acpi_get_physical_device(acpi_handle handle)
-{
- acpi_status status;
- struct device *dev;
-
- status = acpi_get_data(handle, acpi_glue_data_handler, (void **)&dev);
- if (ACPI_SUCCESS(status))
- return get_device(dev);
- return NULL;
-}
-
-EXPORT_SYMBOL(acpi_get_physical_device);
-
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
+ struct acpi_device_physical_node *physical_node;
+ char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ int retval = -EINVAL;
if (dev->archdata.acpi_handle) {
dev_warn(dev, "Drivers changed 'acpi_handle'\n");
return -EINVAL;
}
+
get_device(dev);
- status = acpi_attach_data(handle, acpi_glue_data_handler, dev);
- if (ACPI_FAILURE(status)) {
- put_device(dev);
- return -EINVAL;
+ status = acpi_bus_get_device(handle, &acpi_dev);
+ if (ACPI_FAILURE(status))
+ goto err;
+
+ physical_node = kzalloc(sizeof(struct acpi_device_physical_node),
+ GFP_KERNEL);
+ if (!physical_node) {
+ retval = -ENOMEM;
+ goto err;
}
- dev->archdata.acpi_handle = handle;
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (!ACPI_FAILURE(status)) {
- int ret;
-
- ret = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
- "firmware_node");
- ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
- "physical_node");
- if (acpi_dev->wakeup.flags.valid)
- device_set_wakeup_capable(dev, true);
+ mutex_lock(&acpi_dev->physical_node_lock);
+ /* allocate physical node id according to physical_node_id_bitmap */
+ physical_node->node_id =
+ find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
+ ACPI_MAX_PHYSICAL_NODE);
+ if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
+ retval = -ENOSPC;
+ mutex_unlock(&acpi_dev->physical_node_lock);
+ goto err;
}
+ set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
+ physical_node->dev = dev;
+ list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
+ acpi_dev->physical_node_count++;
+ mutex_unlock(&acpi_dev->physical_node_lock);
+
+ dev->archdata.acpi_handle = handle;
+
+ if (!physical_node->node_id)
+ strcpy(physical_node_name, PHYSICAL_NODE_STRING);
+ else
+ sprintf(physical_node_name,
+ "physical_node%d", physical_node->node_id);
+ retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
+ physical_node_name);
+ retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
+ "firmware_node");
+
+ if (acpi_dev->wakeup.flags.valid)
+ device_set_wakeup_capable(dev, true);
+
return 0;
+
+ err:
+ put_device(dev);
+ return retval;
}
static int acpi_unbind_one(struct device *dev)
{
+ struct acpi_device_physical_node *entry;
+ struct acpi_device *acpi_dev;
+ acpi_status status;
+ struct list_head *node, *next;
+
if (!dev->archdata.acpi_handle)
return 0;
- if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) {
- struct acpi_device *acpi_dev;
- /* acpi_get_physical_device increase refcnt by one */
- put_device(dev);
+ status = acpi_bus_get_device(dev->archdata.acpi_handle,
+ &acpi_dev);
+ if (ACPI_FAILURE(status))
+ goto err;
- if (!acpi_bus_get_device(dev->archdata.acpi_handle,
- &acpi_dev)) {
- sysfs_remove_link(&dev->kobj, "firmware_node");
- sysfs_remove_link(&acpi_dev->dev.kobj, "physical_node");
- }
+ mutex_lock(&acpi_dev->physical_node_lock);
+ list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
+ char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+
+ entry = list_entry(node, struct acpi_device_physical_node,
+ node);
+ if (entry->dev != dev)
+ continue;
+
+ list_del(node);
+ clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
- acpi_detach_data(dev->archdata.acpi_handle,
- acpi_glue_data_handler);
+ acpi_dev->physical_node_count--;
+
+ if (!entry->node_id)
+ strcpy(physical_node_name, PHYSICAL_NODE_STRING);
+ else
+ sprintf(physical_node_name,
+ "physical_node%d", entry->node_id);
+
+ sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
+ sysfs_remove_link(&dev->kobj, "firmware_node");
dev->archdata.acpi_handle = NULL;
/* acpi_bind_one increase refcnt by one */
put_device(dev);
- } else {
- dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
+ kfree(entry);
}
+ mutex_unlock(&acpi_dev->physical_node_lock);
+
return 0;
+
+err:
+ dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
+ return -EINVAL;
}
static int acpi_platform_notify(struct device *dev)
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index d0c1967f7597..20a0f2c3ca3b 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -86,25 +86,7 @@ static struct acpi_driver acpi_hed_driver = {
.notify = acpi_hed_notify,
},
};
-
-static int __init acpi_hed_init(void)
-{
- if (acpi_disabled)
- return -ENODEV;
-
- if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit acpi_hed_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_hed_driver);
-}
-
-module_init(acpi_hed_init);
-module_exit(acpi_hed_exit);
+module_acpi_driver(acpi_hed_driver);
ACPI_MODULE_NAME("hed");
MODULE_AUTHOR("Huang Ying");
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 251c7b6273a9..27adb090bb30 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -302,26 +302,41 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
- struct device *ldev;
+ struct acpi_device_physical_node *entry;
if (!dev->wakeup.flags.valid)
continue;
- ldev = acpi_get_physical_device(dev->handle);
- seq_printf(seq, "%s\t S%d\t%c%-8s ",
+ seq_printf(seq, "%s\t S%d\t",
dev->pnp.bus_id,
- (u32) dev->wakeup.sleep_state,
- dev->wakeup.flags.run_wake ? '*' : ' ',
- (device_may_wakeup(&dev->dev)
- || (ldev && device_may_wakeup(ldev))) ?
- "enabled" : "disabled");
- if (ldev)
- seq_printf(seq, "%s:%s",
- ldev->bus ? ldev->bus->name : "no-bus",
- dev_name(ldev));
- seq_printf(seq, "\n");
- put_device(ldev);
-
+ (u32) dev->wakeup.sleep_state);
+
+ if (!dev->physical_node_count)
+ seq_printf(seq, "%c%-8s\n",
+ dev->wakeup.flags.run_wake ?
+ '*' : ' ', "disabled");
+ else {
+ struct device *ldev;
+ list_for_each_entry(entry, &dev->physical_node_list,
+ node) {
+ ldev = get_device(entry->dev);
+ if (!ldev)
+ continue;
+
+ if (&entry->node !=
+ dev->physical_node_list.next)
+ seq_printf(seq, "\t\t");
+
+ seq_printf(seq, "%c%-8s %s:%s\n",
+ dev->wakeup.flags.run_wake ? '*' : ' ',
+ (device_may_wakeup(&dev->dev) ||
+ (ldev && device_may_wakeup(ldev))) ?
+ "enabled" : "disabled",
+ ldev->bus ? ldev->bus->name :
+ "no-bus", dev_name(ldev));
+ put_device(ldev);
+ }
+ }
}
mutex_unlock(&acpi_device_lock);
return 0;
@@ -329,12 +344,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
static void physical_device_enable_wakeup(struct acpi_device *adev)
{
- struct device *dev = acpi_get_physical_device(adev->handle);
+ struct acpi_device_physical_node *entry;
- if (dev && device_can_wakeup(dev)) {
- bool enable = !device_may_wakeup(dev);
- device_set_wakeup_enable(dev, enable);
- }
+ list_for_each_entry(entry,
+ &adev->physical_node_list, node)
+ if (entry->dev && device_can_wakeup(entry->dev)) {
+ bool enable = !device_may_wakeup(entry->dev);
+ device_set_wakeup_enable(entry->dev, enable);
+ }
}
static ssize_t
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index f8d2a472795c..cf6129a8af7c 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -310,23 +310,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
return 0;
}
-static int __init acpi_smb_hc_init(void)
-{
- int result;
-
- result = acpi_bus_register_driver(&acpi_smb_hc_driver);
- if (result < 0)
- return -ENODEV;
- return 0;
-}
-
-static void __exit acpi_smb_hc_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_smb_hc_driver);
-}
-
-module_init(acpi_smb_hc_init);
-module_exit(acpi_smb_hc_exit);
+module_acpi_driver(acpi_smb_hc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Starikovskiy");
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 04302835723d..1fcb8678665c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -531,6 +531,8 @@ static int acpi_device_register(struct acpi_device *device)
INIT_LIST_HEAD(&device->children);
INIT_LIST_HEAD(&device->node);
INIT_LIST_HEAD(&device->wakeup_list);
+ INIT_LIST_HEAD(&device->physical_node_list);
+ mutex_init(&device->physical_node_lock);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) {
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3e87c9c538aa..462f7e300363 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -384,7 +384,7 @@ acpi_evaluate_reference(acpi_handle handle,
EXPORT_SYMBOL(acpi_evaluate_reference);
acpi_status
-acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld *pld)
+acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -400,13 +400,16 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld *pld)
if (!output || output->type != ACPI_TYPE_PACKAGE
|| !output->package.count
|| output->package.elements[0].type != ACPI_TYPE_BUFFER
- || output->package.elements[0].buffer.length > sizeof(*pld)) {
+ || output->package.elements[0].buffer.length < ACPI_PLD_REV1_BUFFER_SIZE) {
status = AE_TYPE;
goto out;
}
- memcpy(pld, output->package.elements[0].buffer.pointer,
- output->package.elements[0].buffer.length);
+ status = acpi_decode_pld_buffer(
+ output->package.elements[0].buffer.pointer,
+ output->package.elements[0].buffer.length,
+ pld);
+
out:
kfree(buffer.pointer);
return status;
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 38a2d0631882..ad16c68c8645 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -79,6 +79,7 @@ struct nvme_dev {
char serial[20];
char model[40];
char firmware_rev[8];
+ u32 max_hw_sectors;
};
/*
@@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
}
static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr)
+ unsigned nsid, dma_addr_t dma_addr)
{
struct nvme_command c;
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
+ c.features.nsid = cpu_to_le32(nsid);
c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- c.features.dword11 = cpu_to_le32(dword11);
return nvme_submit_admin_cmd(dev, &c, NULL);
}
@@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
return nvme_submit_admin_cmd(dev, &c, result);
}
+/**
+ * nvme_cancel_ios - Cancel outstanding I/Os
+ * @queue: The queue to cancel I/Os on
+ * @timeout: True to only cancel I/Os which have timed out
+ */
+static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = {
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+ };
+
+ if (timeout && !time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
+static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
+{
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+}
+
static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector;
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_cancel_ios(nvmeq, false);
+ spin_unlock_irq(&nvmeq->q_lock);
+
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
@@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
adapter_delete_cq(dev, qid);
}
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
- (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
+ nvme_free_queue_mem(nvmeq);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+ unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
+ sizeof(struct nvme_cmd_info));
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq)
return NULL;
@@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
{
- int result;
+ int result = 0;
u32 aqa;
u64 cap;
unsigned long timeout;
@@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
dev->db_stride = NVME_CAP_STRIDE(cap);
- while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+ while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
msleep(100);
if (fatal_signal_pending(current))
- return -EINTR;
+ result = -EINTR;
if (time_after(jiffies, timeout)) {
dev_err(&dev->pci_dev->dev,
"Device not ready; aborting initialisation\n");
- return -ENODEV;
+ result = -ENODEV;
}
}
+ if (result) {
+ nvme_free_queue_mem(nvmeq);
+ return result;
+ }
+
result = queue_request_irq(dev, nvmeq, "nvme admin");
dev->queues[0] = nvmeq;
return result;
@@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
offset = offset_in_page(addr);
count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
err = get_user_pages_fast(addr, count, 1, pages);
if (err < count) {
@@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return status;
}
-static int nvme_user_admin_cmd(struct nvme_ns *ns,
+static int nvme_user_admin_cmd(struct nvme_dev *dev,
struct nvme_admin_cmd __user *ucmd)
{
- struct nvme_dev *dev = ns->dev;
struct nvme_admin_cmd cmd;
struct nvme_command c;
int status, length;
- struct nvme_iod *iod;
+ struct nvme_iod *uninitialized_var(iod);
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
case NVME_IOCTL_ID:
return ns->ns_id;
case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_admin_cmd(ns, (void __user *)arg);
+ return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
default:
@@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = {
.compat_ioctl = nvme_ioctl,
};
-static void nvme_timeout_ios(struct nvme_queue *nvmeq)
-{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- unsigned long now = jiffies;
- int cmdid;
-
- for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
- void *ctx;
- nvme_completion_fn fn;
- static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
-
- if (!time_after(now, info[cmdid].timeout))
- continue;
- dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
- ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
- }
-}
-
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data)
spin_lock_irq(&nvmeq->q_lock);
if (nvme_process_cq(nvmeq))
printk("process_cq did something\n");
- nvme_timeout_ios(nvmeq);
+ nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
}
@@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->disk = disk;
lbaf = id->flbas & 0xf;
ns->lba_shift = id->lbaf[lbaf].ds;
+ blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
+ if (dev->max_hw_sectors)
+ blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major;
disk->minors = NVME_MINORS;
@@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
- int result, cpu, i, nr_io_queues, db_bar_size;
+ int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
cpu = cpumask_next(cpu, cpu_online_mask);
}
+ q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
+ NVME_Q_DEPTH);
for (i = 0; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
- NVME_Q_DEPTH, i);
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
if (IS_ERR(dev->queues[i + 1]))
return PTR_ERR(dev->queues[i + 1]);
dev->queue_count++;
@@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+ if (ctrl->mdts) {
+ int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+ dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
+ }
id_ns = mem;
for (i = 1; i <= nn; i++) {
@@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
list_del(&dev->node);
spin_unlock(&dev_list_lock);
- /* TODO: wait all I/O finished or cancel them */
-
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
del_gendisk(ns->disk);
@@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
dma_pool_destroy(dev->prp_small_pool);
}
-/* XXX: Use an ida or something to let remove / add work correctly */
-static void nvme_set_instance(struct nvme_dev *dev)
+static DEFINE_IDA(nvme_instance_ida);
+
+static int nvme_set_instance(struct nvme_dev *dev)
{
- static int instance;
- dev->instance = instance++;
+ int instance, error;
+
+ do {
+ if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
+ return -ENODEV;
+
+ spin_lock(&dev_list_lock);
+ error = ida_get_new(&nvme_instance_ida, &instance);
+ spin_unlock(&dev_list_lock);
+ } while (error == -EAGAIN);
+
+ if (error)
+ return -ENODEV;
+
+ dev->instance = instance;
+ return 0;
}
static void nvme_release_instance(struct nvme_dev *dev)
{
+ spin_lock(&dev_list_lock);
+ ida_remove(&nvme_instance_ida, dev->instance);
+ spin_unlock(&dev_list_lock);
}
static int __devinit nvme_probe(struct pci_dev *pdev,
@@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- nvme_set_instance(dev);
+ result = nvme_set_instance(dev);
+ if (result)
+ goto disable;
+
dev->entry[0].vector = pdev->irq;
result = nvme_setup_prp_pools(dev);
@@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void)
{
- int result = -EBUSY;
+ int result;
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
- nvme_major = register_blkdev(nvme_major, "nvme");
- if (nvme_major <= 0)
+ result = register_blkdev(nvme_major, "nvme");
+ if (result < 0)
goto kill_kthread;
+ else if (result > 0)
+ nvme_major = result;
result = pci_register_driver(&nvme_driver);
if (result)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9917943a3572..54a55f03115d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
- rbd_get_dev(rbd_dev);
-
- set_device_ro(bdev, rbd_dev->read_only);
-
if ((mode & FMODE_WRITE) && rbd_dev->read_only)
return -EROFS;
+ rbd_get_dev(rbd_dev);
+ set_device_ro(bdev, rbd_dev->read_only);
+
return 0;
}
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 47180a08edad..b6653a6fc5d5 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
for (j = 0; j < nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j]->dimm;
- dimm->nr_pages = nr_pages / nr_channels;
+ dimm->nr_pages = nr_pages;
dimm->grain = nr_pages << PAGE_SHIFT;
dimm->mtype = MEM_DDR2;
dimm->dtype = DEV_UNKNOWN;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 39c63757c2a1..6a49dd00b81b 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
/* add the number of COLUMN bits */
addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+ /* Dual-rank memories have twice the size */
+ if (dinfo->dual_rank)
+ addrBits++;
+
addrBits += 6; /* add 64 bits per DIMM */
addrBits -= 20; /* divide by 2^^20 */
addrBits -= 3; /* 8 bits per bytes */
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f3b1f9fafa4b..5715b7c2c517 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct dimm_info *dimm;
- int i, j, banks, ranks, rows, cols, size, npages;
+ unsigned i, j, banks, ranks, rows, cols, npages;
+ u64 size;
u32 reg;
enum edac_type mode;
enum mem_type mtype;
@@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
cols = numcol(mtr);
/* DDR3 has 8 I/O banks */
- size = (rows * cols * banks * ranks) >> (20 - 3);
+ size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
pvt->sbridge_dev->mc, i, j,
size, npages,
banks, ranks, rows, cols);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 8a420f13905e..ed94b4ea72e9 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
{
struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+ __set_gpio_level_p012(group, pin, value);
__set_gpio_dir_p012(group, pin, 0);
return 0;
@@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
{
struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+ __set_gpio_level_p3(group, pin, value);
__set_gpio_dir_p3(group, pin, 0);
return 0;
@@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
int value)
{
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+ __set_gpo_level_p3(group, pin, value);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index ff23d88880e5..3ca240b4413d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
return 0;
} else
if (init->class == 0x906e) {
- NV_ERROR(dev, "906e not supported yet\n");
+ NV_DEBUG(dev, "906e not supported yet\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index f704e942372e..f376c39310df 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev)
priv = dev_priv->engine.fb.priv;
nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
+ nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 7d85553d518c..cd39eb99f5b1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
static void
nvc0_fifo_isr(struct drm_device *dev)
{
- u32 stat = nv_rd32(dev, 0x002100);
+ u32 mask = nv_rd32(dev, 0x002140);
+ u32 stat = nv_rd32(dev, 0x002100) & mask;
if (stat & 0x00000100) {
NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index e98d144e6eb9..281bece751b6 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -345,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
static void
nve0_fifo_isr(struct drm_device *dev)
{
- u32 stat = nv_rd32(dev, 0x002100);
+ u32 mask = nv_rd32(dev, 0x002140);
+ u32 stat = nv_rd32(dev, 0x002100) & mask;
if (stat & 0x00000100) {
NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ba055e9ca007..8d9dc44f1f94 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector)
static int udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct udl_device *udl = connector->dev->dev_private;
+ if (!udl->sku_pixel_limit)
+ return 0;
+
+ if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
+ return MODE_VIRTUAL_Y;
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f2fb8f15e2f1..7e0743358dff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
}
- event = kzalloc(sizeof(event->event), GFP_KERNEL);
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
if (unlikely(event == NULL)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 388cbdc96db7..6aafa3d88ff0 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -426,19 +426,7 @@ static struct acpi_driver acpi_smbus_cmi_driver = {
.remove = acpi_smbus_cmi_remove,
},
};
-
-static int __init acpi_smbus_cmi_init(void)
-{
- return acpi_bus_register_driver(&acpi_smbus_cmi_driver);
-}
-
-static void __exit acpi_smbus_cmi_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_smbus_cmi_driver);
-}
-
-module_init(acpi_smbus_cmi_init);
-module_exit(acpi_smbus_cmi_exit);
+module_acpi_driver(acpi_smbus_cmi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crane Cai <crane.cai@amd.com>");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index e8726177d103..b0f6b4c8ee14 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -413,6 +413,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2a, idle_cpu_snb),
ICPU(0x2d, idle_cpu_snb),
ICPU(0x3a, idle_cpu_ivb),
+ ICPU(0x3e, idle_cpu_ivb),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 601f7372f9c4..26f13131639a 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -151,22 +151,7 @@ static struct acpi_driver atlas_acpi_driver = {
.remove = atlas_acpi_button_remove,
},
};
-
-static int __init atlas_acpi_init(void)
-{
- if (acpi_disabled)
- return -ENODEV;
-
- return acpi_bus_register_driver(&atlas_acpi_driver);
-}
-
-static void __exit atlas_acpi_exit(void)
-{
- acpi_bus_unregister_driver(&atlas_acpi_driver);
-}
-
-module_init(atlas_acpi_init);
-module_exit(atlas_acpi_exit);
+module_acpi_driver(atlas_acpi_driver);
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b64502dfa9f4..e89daf1b21b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -266,7 +266,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
static int iommu_init_device(struct device *dev)
{
- struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
+ struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
struct iommu_group *group;
u16 alias;
@@ -293,7 +293,9 @@ static int iommu_init_device(struct device *dev)
dev_data->alias_data = alias_data;
dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
- } else
+ }
+
+ if (dma_pdev == NULL)
dma_pdev = pci_dev_get(pdev);
/* Account for quirked devices */
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d8abb90a6c2f..034233eefc82 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct multipath *m = ti->private;
+ struct pgpath *pgpath;
struct block_device *bdev;
fmode_t mode;
unsigned long flags;
@@ -1570,12 +1571,14 @@ again:
if (!m->current_pgpath)
__choose_pgpath(m, 0);
- if (m->current_pgpath) {
- bdev = m->current_pgpath->path.dev->bdev;
- mode = m->current_pgpath->path.dev->mode;
+ pgpath = m->current_pgpath;
+
+ if (pgpath) {
+ bdev = pgpath->path.dev->bdev;
+ mode = pgpath->path.dev->mode;
}
- if (m->queue_io)
+ if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
r = -EAGAIN;
else if (!bdev)
r = -EIO;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f90069029aae..100368eb7991 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
+static int count_device(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ unsigned *num_devices = data;
+
+ (*num_devices)++;
+
+ return 0;
+}
+
+/*
+ * Check whether a table has no data devices attached using each
+ * target's iterate_devices method.
+ * Returns false if the result is unknown because a target doesn't
+ * support iterate_devices.
+ */
+bool dm_table_has_no_data_devices(struct dm_table *table)
+{
+ struct dm_target *uninitialized_var(ti);
+ unsigned i = 0, num_devices = 0;
+
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ if (!ti->type->iterate_devices)
+ return false;
+
+ ti->type->iterate_devices(ti, count_device, &num_devices);
+ if (num_devices)
+ return false;
+ }
+
+ return true;
+}
+
/*
* Establish the new table's queue_limits and validate them.
*/
@@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
return q && blk_queue_nonrot(q);
}
-static bool dm_table_is_nonrot(struct dm_table *t)
+static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !blk_queue_add_random(q);
+}
+
+static bool dm_table_all_devices_attribute(struct dm_table *t,
+ iterate_devices_callout_fn func)
{
struct dm_target *ti;
unsigned i = 0;
- /* Ensure that all underlying device are non-rotational. */
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
+ !ti->type->iterate_devices(ti, func, NULL))
return 0;
}
@@ -1396,7 +1439,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
- if (dm_table_is_nonrot(t))
+ /* Ensure that all underlying devices are non-rotational. */
+ if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
@@ -1404,6 +1448,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_set_integrity(t);
/*
+ * Determine whether or not this queue's I/O timings contribute
+ * to the entropy pool, Only request-based targets use this.
+ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+ * have it set.
+ */
+ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+
+ /*
* QUEUE_FLAG_STACKABLE must be set after all queue settings are
* visible to other CPUs because, once the flag is set, incoming bios
* are processed by request-based dm, which refers to the queue
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index af1fc3b2c2ad..c29410af1e22 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -509,9 +509,9 @@ enum pool_mode {
struct pool_features {
enum pool_mode mode;
- unsigned zero_new_blocks:1;
- unsigned discard_enabled:1;
- unsigned discard_passdown:1;
+ bool zero_new_blocks:1;
+ bool discard_enabled:1;
+ bool discard_passdown:1;
};
struct thin_c;
@@ -580,7 +580,8 @@ struct pool_c {
struct dm_target_callbacks callbacks;
dm_block_t low_water_blocks;
- struct pool_features pf;
+ struct pool_features requested_pf; /* Features requested during table load */
+ struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
};
/*
@@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool)
/*----------------------------------------------------------------
* Binding of control targets to a pool object
*--------------------------------------------------------------*/
+static bool data_dev_supports_discard(struct pool_c *pt)
+{
+ struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
+
+ return q && blk_queue_discard(q);
+}
+
+/*
+ * If discard_passdown was enabled verify that the data device
+ * supports discards. Disable discard_passdown if not.
+ */
+static void disable_passdown_if_not_supported(struct pool_c *pt)
+{
+ struct pool *pool = pt->pool;
+ struct block_device *data_bdev = pt->data_dev->bdev;
+ struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+ sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
+ const char *reason = NULL;
+ char buf[BDEVNAME_SIZE];
+
+ if (!pt->adjusted_pf.discard_passdown)
+ return;
+
+ if (!data_dev_supports_discard(pt))
+ reason = "discard unsupported";
+
+ else if (data_limits->max_discard_sectors < pool->sectors_per_block)
+ reason = "max discard sectors smaller than a block";
+
+ else if (data_limits->discard_granularity > block_size)
+ reason = "discard granularity larger than a block";
+
+ else if (block_size & (data_limits->discard_granularity - 1))
+ reason = "discard granularity not a factor of block size";
+
+ if (reason) {
+ DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
+ pt->adjusted_pf.discard_passdown = false;
+ }
+}
+
static int bind_control_target(struct pool *pool, struct dm_target *ti)
{
struct pool_c *pt = ti->private;
@@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
* We want to make sure that degraded pools are never upgraded.
*/
enum pool_mode old_mode = pool->pf.mode;
- enum pool_mode new_mode = pt->pf.mode;
+ enum pool_mode new_mode = pt->adjusted_pf.mode;
if (old_mode > new_mode)
new_mode = old_mode;
pool->ti = ti;
pool->low_water_blocks = pt->low_water_blocks;
- pool->pf = pt->pf;
- set_pool_mode(pool, new_mode);
+ pool->pf = pt->adjusted_pf;
- /*
- * If discard_passdown was enabled verify that the data device
- * supports discards. Disable discard_passdown if not; otherwise
- * -EOPNOTSUPP will be returned.
- */
- /* FIXME: pull this out into a sep fn. */
- if (pt->pf.discard_passdown) {
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
- if (!q || !blk_queue_discard(q)) {
- char buf[BDEVNAME_SIZE];
- DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
- bdevname(pt->data_dev->bdev, buf));
- pool->pf.discard_passdown = 0;
- }
- }
+ set_pool_mode(pool, new_mode);
return 0;
}
@@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
static void pool_features_init(struct pool_features *pf)
{
pf->mode = PM_WRITE;
- pf->zero_new_blocks = 1;
- pf->discard_enabled = 1;
- pf->discard_passdown = 1;
+ pf->zero_new_blocks = true;
+ pf->discard_enabled = true;
+ pf->discard_passdown = true;
}
static void __pool_destroy(struct pool *pool)
@@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
argc--;
if (!strcasecmp(arg_name, "skip_block_zeroing"))
- pf->zero_new_blocks = 0;
+ pf->zero_new_blocks = false;
else if (!strcasecmp(arg_name, "ignore_discard"))
- pf->discard_enabled = 0;
+ pf->discard_enabled = false;
else if (!strcasecmp(arg_name, "no_discard_passdown"))
- pf->discard_passdown = 0;
+ pf->discard_passdown = false;
else if (!strcasecmp(arg_name, "read_only"))
pf->mode = PM_READ_ONLY;
@@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->metadata_dev = metadata_dev;
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
- pt->pf = pf;
+ pt->adjusted_pf = pt->requested_pf = pf;
ti->num_flush_requests = 1;
+
/*
* Only need to enable discards if the pool should pass
* them down to the data device. The thin device's discard
@@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
*/
if (pf.discard_enabled && pf.discard_passdown) {
ti->num_discard_requests = 1;
+
/*
* Setting 'discards_supported' circumvents the normal
* stacking of discard limits (this keeps the pool and
* thin devices' discard limits consistent).
*/
ti->discards_supported = true;
+ ti->discard_zeroes_data_unsupported = true;
}
ti->private = pt;
@@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
(unsigned long)pool->sectors_per_block,
(unsigned long long)pt->low_water_blocks);
- emit_flags(&pt->pf, result, sz, maxlen);
+ emit_flags(&pt->requested_pf, result, sz, maxlen);
break;
}
@@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
-static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
+static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
{
- /*
- * FIXME: these limits may be incompatible with the pool's data device
- */
+ struct pool *pool = pt->pool;
+ struct queue_limits *data_limits;
+
limits->max_discard_sectors = pool->sectors_per_block;
/*
- * This is just a hint, and not enforced. We have to cope with
- * bios that cover a block partially. A discard that spans a block
- * boundary is not sent to this target.
+ * discard_granularity is just a hint, and not enforced.
*/
- limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- limits->discard_zeroes_data = pool->pf.zero_new_blocks;
+ if (pt->adjusted_pf.discard_passdown) {
+ data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
+ limits->discard_granularity = data_limits->discard_granularity;
+ } else
+ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
}
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, 0);
blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
- if (pool->pf.discard_enabled)
- set_discard_limits(pool, limits);
+
+ /*
+ * pt->adjusted_pf is a staging area for the actual features to use.
+ * They get transferred to the live pool in bind_control_target()
+ * called from pool_preresume().
+ */
+ if (!pt->adjusted_pf.discard_enabled)
+ return;
+
+ disable_passdown_if_not_supported(pt);
+
+ set_discard_limits(pt, limits);
}
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti,
return 0;
}
+/*
+ * A thin device always inherits its queue limits from its pool.
+ */
static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct thin_c *tc = ti->private;
- struct pool *pool = tc->pool;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
- set_discard_limits(pool, limits);
+ *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
}
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 254d19268ad2..892ae2766aa6 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->hash_dev_block_bits = ffs(num) - 1;
if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
- num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) !=
- (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) {
+ (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+ >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid data blocks";
r = -EINVAL;
goto bad;
@@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
- num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) !=
- (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) {
+ (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
+ >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid hash start";
r = -EINVAL;
goto bad;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4e09b6ff5b49..67ffa391edcf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ dm_request_endio_fn rq_end_io = NULL;
- if (mapped && rq_end_io)
- r = rq_end_io(tio->ti, clone, error, &tio->info);
+ if (tio->ti) {
+ rq_end_io = tio->ti->type->rq_end_io;
+
+ if (mapped && rq_end_io)
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
+ }
if (r <= 0)
/* The target wants to complete the I/O */
@@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Hold the md reference here for the in-flight I/O.
- * We can't rely on the reference count by device opener,
- * because the device may be closed during the request completion
- * when all bios are completed.
- * See the comment in rq_completed() too.
- */
- dm_get(md);
-
tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
@@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
return requeued;
}
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+ struct request *clone;
+
+ blk_start_request(orig);
+ clone = orig->special;
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ return clone;
+}
+
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
@@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
- BUG_ON(!dm_target_is_valid(ti));
+ if (!dm_target_is_valid(ti)) {
+ /*
+ * Must perform setup, that dm_done() requires,
+ * before calling dm_kill_unmapped_request
+ */
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ clone = dm_start_request(md, rq);
+ dm_kill_unmapped_request(clone, -EIO);
+ continue;
+ }
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
- blk_start_request(rq);
- clone = rq->special;
- atomic_inc(&md->pending[rq_data_dir(clone)]);
+ clone = dm_start_request(md, rq);
spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
@@ -1684,8 +1706,6 @@ delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
-
- return;
}
int dm_underlying_device_busy(struct request_queue *q)
@@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- struct dm_table *map = ERR_PTR(-EINVAL);
+ struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
@@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
if (!dm_suspended_md(md))
goto out;
+ /*
+ * If the new table has no data devices, retain the existing limits.
+ * This helps multipath with queue_if_no_path if all paths disappear,
+ * then new I/O is queued based on these limits, and then some paths
+ * reappear.
+ */
+ if (dm_table_has_no_data_devices(table)) {
+ live_map = dm_get_live_table(md);
+ if (live_map)
+ limits = md->queue->limits;
+ dm_table_put(live_map);
+ }
+
r = dm_calculate_queue_limits(table, &limits);
if (r) {
map = ERR_PTR(r);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 52eef493d266..6a99fefaa743 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
+bool dm_table_has_no_data_devices(struct dm_table *table);
int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c2eb38f3c51..0138a727c1f3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1512,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
do {
int n = conf->copies;
int cnt = 0;
+ int this = first;
while (n--) {
- if (conf->mirrors[first].rdev &&
- first != ignore)
+ if (conf->mirrors[this].rdev &&
+ this != ignore)
cnt++;
- first = (first+1) % geo->raid_disks;
+ this = (this+1) % geo->raid_disks;
}
if (cnt == 0)
return 0;
+ first = (first + geo->near_copies) % geo->raid_disks;
} while (first != 0);
return 1;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7031b865b3a0..0689173fd9f5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1591,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&nsh->ops.wait_for_ops);
#endif
+ spin_lock_init(&nsh->stripe_lock);
list_add(&nsh->lru, &newstripes);
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482bec573..a6e74514e662 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
}
#endif
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+ return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+ return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+ pgoff_t pgoff = off >> PAGE_SHIFT;
+ if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+ return -EINVAL;
+ if (off + get_vm_size(vma) - 1 < off)
+ return -EINVAL;
+ vma->vm_pgoff = pgoff;
+ return 0;
+}
+
/*
* set up a mapping for shared memory segments
*/
@@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
- unsigned long start;
- unsigned long off;
- u32 len;
+ resource_size_t start, off;
+ unsigned long len, vma_len;
if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
- off = vma->vm_pgoff << PAGE_SHIFT;
+ off = get_vm_offset(vma);
start = map->phys;
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
+ vma_len = get_vm_size(vma);
+
+ /* Overflow in off+len? */
+ if (vma_len + off < off)
+ return -EINVAL;
+ /* Does it fit in the mapping? */
+ if (vma_len + off > len)
return -EINVAL;
off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
+ /* Did that overflow? */
+ if (off < start)
+ return -EINVAL;
+ if (set_vm_offset(vma, off) < 0)
+ return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED;
#ifdef pgprot_noncached
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 79cebd8525ce..e48312f2305d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8564,7 +8564,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
- iounmap(bp->regview);
+ pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index c42bbb16cdae..a688a2ddcfd6 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -722,10 +722,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
octeon_mgmt_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
- if (IS_ERR(p->phydev)) {
- p->phydev = NULL;
+ if (!p->phydev)
return -1;
- }
phy_start_aneg(p->phydev);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index e559dfa06d6a..6fa74d530e44 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
PHY_INTERFACE_MODE_SGMII);
- if (IS_ERR(phydev)) {
+ if (!phydev) {
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
- return PTR_ERR(phydev);
+ return -ENODEV;
}
mac->phydev = phydev;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b8ead696141e..2a179d087207 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -15,7 +15,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
do {
/* give atleast 1ms for firmware to respond */
- msleep(1);
+ mdelay(1);
if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
return QLCNIC_CDRP_RSP_TIMEOUT;
@@ -601,7 +601,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_destroy_tx_ctx(adapter);
/* Allow dma queues to drain after context reset */
- msleep(20);
+ mdelay(20);
}
}
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 2346b38b9837..799789518e87 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -229,3 +229,5 @@ static void __exit bcm87xx_exit(void)
ARRAY_SIZE(bcm87xx_driver));
}
module_exit(bcm87xx_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf287e0eb408..2165d5fdb8c0 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -21,6 +21,12 @@
#include <linux/phy.h>
#include <linux/micrel_phy.h>
+/* Operation Mode Strap Override */
+#define MII_KSZPHY_OMSO 0x16
+#define KSZPHY_OMSO_B_CAST_OFF (1 << 9)
+#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1)
+#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0)
+
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
#define KSZPHY_INTCS_JABBER (1 << 15)
@@ -101,6 +107,13 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
+static int ksz8021_config_init(struct phy_device *phydev)
+{
+ const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
+ phy_write(phydev, MII_KSZPHY_OMSO, val);
+ return 0;
+}
+
static int ks8051_config_init(struct phy_device *phydev)
{
int regval;
@@ -128,9 +141,22 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = ks8737_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
- .phy_id = PHY_ID_KS8041,
+ .phy_id = PHY_ID_KSZ8021,
+ .phy_id_mask = 0x00ffffff,
+ .name = "Micrel KSZ8021",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = ksz8021_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ .phy_id = PHY_ID_KSZ8041,
.phy_id_mask = 0x00fffff0,
- .name = "Micrel KS8041",
+ .name = "Micrel KSZ8041",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -141,9 +167,9 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
- .phy_id = PHY_ID_KS8051,
+ .phy_id = PHY_ID_KSZ8051,
.phy_id_mask = 0x00fffff0,
- .name = "Micrel KS8051",
+ .name = "Micrel KSZ8051",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -154,8 +180,8 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
- .phy_id = PHY_ID_KS8001,
- .name = "Micrel KS8001 or KS8721",
+ .phy_id = PHY_ID_KSZ8001,
+ .name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -201,10 +227,11 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
- { PHY_ID_KS8001, 0x00ffffff },
+ { PHY_ID_KSZ8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
- { PHY_ID_KS8041, 0x00fffff0 },
- { PHY_ID_KS8051, 0x00fffff0 },
+ { PHY_ID_KSZ8021, 0x00ffffff },
+ { PHY_ID_KSZ8041, 0x00fffff0 },
+ { PHY_ID_KSZ8051, 0x00fffff0 },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 6d6192316b30..88e3991464e7 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -56,6 +56,32 @@ static int smsc_phy_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt (phydev);
}
+static int lan87xx_config_init(struct phy_device *phydev)
+{
+ /*
+ * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
+ * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
+ * to a bug on the chip.
+ *
+ * When the system is powered on with the network cable being
+ * disconnected all the way until after ifconfig ethX up is
+ * issued for the LAN port with this PHY, connecting the cable
+ * afterwards does not cause LINK change detection, while the
+ * expected behavior is the Link UP being detected.
+ */
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc &= ~MII_LAN83C185_EDPWRDOWN;
+
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
+ if (rc < 0)
+ return rc;
+
+ return smsc_phy_ack_interrupt(phydev);
+}
+
static int lan911x_config_init(struct phy_device *phydev)
{
return smsc_phy_ack_interrupt(phydev);
@@ -162,7 +188,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan87xx_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index cbf7047decc0..20f31d0d1536 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock)
po = pppox_sk(sk);
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 341b65dbbcd3..f8cd61f449a4 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -848,7 +848,7 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
}
#endif
-static void __team_port_change_check(struct team_port *port, bool linkup);
+static void __team_port_change_port_added(struct team_port *port, bool linkup);
static int team_port_add(struct team *team, struct net_device *port_dev)
{
@@ -948,7 +948,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
__team_compute_features(team);
- __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+ __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
__team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -983,6 +983,8 @@ err_set_mtu:
return err;
}
+static void __team_port_change_port_removed(struct team_port *port);
+
static int team_port_del(struct team *team, struct net_device *port_dev)
{
struct net_device *dev = team->dev;
@@ -999,8 +1001,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
__team_option_inst_mark_removed_port(team, port);
__team_options_change_check(team);
__team_option_inst_del_port(team, port);
- port->removed = true;
- __team_port_change_check(port, false);
+ __team_port_change_port_removed(port);
team_port_disable(team, port);
list_del_rcu(&port->list);
netdev_rx_handler_unregister(port_dev);
@@ -1652,8 +1653,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
&team_nl_family, 0, TEAM_CMD_NOOP);
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -EMSGSIZE;
goto err_msg_put;
}
@@ -1847,8 +1848,8 @@ start_again:
hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -EMSGSIZE;
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
@@ -2067,8 +2068,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
TEAM_CMD_PORT_LIST_GET);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -EMSGSIZE;
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
@@ -2251,13 +2252,11 @@ static void __team_options_change_check(struct team *team)
}
/* rtnl lock is held */
-static void __team_port_change_check(struct team_port *port, bool linkup)
+
+static void __team_port_change_send(struct team_port *port, bool linkup)
{
int err;
- if (!port->removed && port->state.linkup == linkup)
- return;
-
port->changed = true;
port->state.linkup = linkup;
team_refresh_port_linkup(port);
@@ -2282,6 +2281,23 @@ send_event:
}
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+ if (port->state.linkup != linkup)
+ __team_port_change_send(port, linkup);
+}
+
+static void __team_port_change_port_added(struct team_port *port, bool linkup)
+{
+ __team_port_change_send(port, linkup);
+}
+
+static void __team_port_change_port_removed(struct team_port *port)
+{
+ port->removed = true;
+ __team_port_change_send(port, false);
+}
+
static void team_port_change_check(struct team_port *port, bool linkup)
{
struct team *team = port->team;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f5ab6e613ec8..376143e8a1aa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1253,6 +1253,7 @@ static struct usb_driver smsc75xx_driver = {
.probe = usbnet_probe,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 1e86ea2266d4..dbeebef562d5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1442,6 +1442,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err;
err_free_irq:
+ trans_pcie->irq_requested = false;
free_irq(trans_pcie->irq, trans);
error:
iwl_free_isr_ict(trans);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 6b9af989632b..18d74f29dcb2 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -382,31 +382,8 @@ static struct acpi_driver lis3lv02d_driver = {
},
.drv.pm = HP_ACCEL_PM,
};
-
-static int __init lis3lv02d_init_module(void)
-{
- int ret;
-
- if (acpi_disabled)
- return -ENODEV;
-
- ret = acpi_bus_register_driver(&lis3lv02d_driver);
- if (ret < 0)
- return ret;
-
- pr_info("driver loaded\n");
-
- return 0;
-}
-
-static void __exit lis3lv02d_exit_module(void)
-{
- acpi_bus_unregister_driver(&lis3lv02d_driver);
-}
+module_acpi_driver(lis3lv02d_driver);
MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
MODULE_LICENSE("GPL");
-
-module_init(lis3lv02d_init_module);
-module_exit(lis3lv02d_exit_module);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index dae7abe1d711..5ff4f2e314d2 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -917,20 +917,8 @@ static struct acpi_driver ideapad_acpi_driver = {
.drv.pm = &ideapad_pm,
.owner = THIS_MODULE,
};
-
-static int __init ideapad_acpi_module_init(void)
-{
- return acpi_bus_register_driver(&ideapad_acpi_driver);
-}
-
-static void __exit ideapad_acpi_module_exit(void)
-{
- acpi_bus_unregister_driver(&ideapad_acpi_driver);
-}
+module_acpi_driver(ideapad_acpi_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("IdeaPad ACPI Extras");
MODULE_LICENSE("GPL");
-
-module_init(ideapad_acpi_module_init);
-module_exit(ideapad_acpi_module_exit);
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index d528daa0e81c..d727bfee89a6 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -186,27 +186,7 @@ static struct acpi_driver acpi_topstar_driver = {
.notify = acpi_topstar_notify,
},
};
-
-static int __init topstar_laptop_init(void)
-{
- int ret;
-
- ret = acpi_bus_register_driver(&acpi_topstar_driver);
- if (ret < 0)
- return ret;
-
- pr_info("ACPI extras driver loaded\n");
-
- return 0;
-}
-
-static void __exit topstar_laptop_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_topstar_driver);
-}
-
-module_init(topstar_laptop_init);
-module_exit(topstar_laptop_exit);
+module_acpi_driver(acpi_topstar_driver);
MODULE_AUTHOR("Herton Ronaldo Krzesinski");
MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver");
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 5e5d6317d690..e95be0b74859 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -122,30 +122,10 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
return result;
}
-static int __init toshiba_bt_rfkill_init(void)
-{
- int result;
-
- result = acpi_bus_register_driver(&toshiba_bt_rfkill_driver);
- if (result < 0) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error registering driver\n"));
- return result;
- }
-
- return 0;
-}
-
static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type)
{
/* clean up */
return 0;
}
-static void __exit toshiba_bt_rfkill_exit(void)
-{
- acpi_bus_unregister_driver(&toshiba_bt_rfkill_driver);
-}
-
-module_init(toshiba_bt_rfkill_init);
-module_exit(toshiba_bt_rfkill_exit);
+module_acpi_driver(toshiba_bt_rfkill_driver);
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 38ba39d7ca7d..16d340c3b852 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -170,16 +170,4 @@ static struct acpi_driver xo15_ebook_driver = {
},
.drv.pm = &ebook_switch_pm,
};
-
-static int __init xo15_ebook_init(void)
-{
- return acpi_bus_register_driver(&xo15_ebook_driver);
-}
-
-static void __exit xo15_ebook_exit(void)
-{
- acpi_bus_unregister_driver(&xo15_ebook_driver);
-}
-
-module_init(xo15_ebook_init);
-module_exit(xo15_ebook_exit);
+module_acpi_driver(xo15_ebook_driver);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 507a8e2b9a4c..26b5d4b18dd7 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -321,14 +321,9 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
{
struct acpi_device *acpi = to_acpi_device(dev);
struct pnp_dev *pnp = _pnp;
- struct device *physical_device;
-
- physical_device = acpi_get_physical_device(acpi->handle);
- if (physical_device)
- put_device(physical_device);
/* true means it matched */
- return !physical_device
+ return !acpi->physical_node_count
&& compare_pnp_id(pnp->id, acpi_device_hid(acpi));
}
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c
index a3ac39b79192..0646bf6e7889 100644
--- a/drivers/sh/pfc/pinctrl.c
+++ b/drivers/sh/pfc/pinctrl.c
@@ -208,6 +208,8 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
break;
case PINMUX_TYPE_GPIO:
+ case PINMUX_TYPE_INPUT:
+ case PINMUX_TYPE_OUTPUT:
break;
default:
pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index d95696584762..3440812b4a84 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
/* print devices for all busses */
list_for_each_entry(bus, &usb_bus_list, bus_list) {
/* recurse through all children of the root hub */
- if (!bus->root_hub)
+ if (!bus_to_hcd(bus)->rh_registered)
continue;
usb_lock_device(bus->root_hub);
ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index bc84106ac057..75ba2091f9b4 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (retval) {
dev_err (parent_dev, "can't register root hub for %s, %d\n",
dev_name(&usb_dev->dev), retval);
- }
- mutex_unlock(&usb_bus_list_lock);
-
- if (retval == 0) {
+ } else {
spin_lock_irq (&hcd_root_hub_lock);
hcd->rh_registered = 1;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
}
+ mutex_unlock(&usb_bus_list_lock);
return retval;
}
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 8947b203d5a4..ce45f5535236 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -52,18 +52,19 @@ out:
static int usb_acpi_check_pld(struct usb_device *udev, acpi_handle handle)
{
acpi_status status;
- struct acpi_pld pld;
+ struct acpi_pld_info *pld;
status = acpi_get_physical_device_location(handle, &pld);
if (ACPI_FAILURE(status))
return -ENODEV;
- if (pld.user_visible)
+ if (pld->user_visible)
udev->removable = USB_DEVICE_REMOVABLE;
else
udev->removable = USB_DEVICE_FIXED;
+ ACPI_FREE(pld);
return 0;
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index aaa8d2bce217..0bf72f943b00 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
/* From the GPIO notifying the over-current situation, find
* out the corresponding port */
at91_for_each_port(port) {
- if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
+ if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
+ gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
gpio = pdata->overcurrent_pin[port];
break;
}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 211a4920b88a..d8dedc7d3910 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -76,9 +76,24 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
schedule_work(&virqfd->inject);
}
- if (flags & POLLHUP)
- /* The eventfd is closing, detach from VFIO */
- virqfd_deactivate(virqfd);
+ if (flags & POLLHUP) {
+ unsigned long flags;
+ spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
+
+ /*
+ * The eventfd is closing, if the virqfd has not yet been
+ * queued for release, as determined by testing whether the
+ * vdev pointer to it is still valid, queue it now. As
+ * with kvm irqfds, we know we won't race against the virqfd
+ * going away because we hold wqh->lock to get here.
+ */
+ if (*(virqfd->pvirqfd) == virqfd) {
+ *(virqfd->pvirqfd) = NULL;
+ virqfd_deactivate(virqfd);
+ }
+
+ spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
+ }
return 0;
}
@@ -93,7 +108,6 @@ static void virqfd_ptable_queue_proc(struct file *file,
static void virqfd_shutdown(struct work_struct *work)
{
struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
- struct virqfd **pvirqfd = virqfd->pvirqfd;
u64 cnt;
eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
@@ -101,7 +115,6 @@ static void virqfd_shutdown(struct work_struct *work)
eventfd_ctx_put(virqfd->eventfd);
kfree(virqfd);
- *pvirqfd = NULL;
}
static void virqfd_inject(struct work_struct *work)
@@ -122,15 +135,11 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
int ret = 0;
unsigned int events;
- if (*pvirqfd)
- return -EBUSY;
-
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
if (!virqfd)
return -ENOMEM;
virqfd->pvirqfd = pvirqfd;
- *pvirqfd = virqfd;
virqfd->vdev = vdev;
virqfd->handler = handler;
virqfd->thread = thread;
@@ -154,6 +163,23 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
virqfd->eventfd = ctx;
/*
+ * virqfds can be released by closing the eventfd or directly
+ * through ioctl. These are both done through a workqueue, so
+ * we update the pointer to the virqfd under lock to avoid
+ * pushing multiple jobs to release the same virqfd.
+ */
+ spin_lock_irq(&vdev->irqlock);
+
+ if (*pvirqfd) {
+ spin_unlock_irq(&vdev->irqlock);
+ ret = -EBUSY;
+ goto fail;
+ }
+ *pvirqfd = virqfd;
+
+ spin_unlock_irq(&vdev->irqlock);
+
+ /*
* Install our own custom wake-up handling so we are notified via
* a callback whenever someone signals the underlying eventfd.
*/
@@ -187,19 +213,29 @@ fail:
fput(file);
kfree(virqfd);
- *pvirqfd = NULL;
return ret;
}
-static void virqfd_disable(struct virqfd *virqfd)
+static void virqfd_disable(struct vfio_pci_device *vdev,
+ struct virqfd **pvirqfd)
{
- if (!virqfd)
- return;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ if (*pvirqfd) {
+ virqfd_deactivate(*pvirqfd);
+ *pvirqfd = NULL;
+ }
- virqfd_deactivate(virqfd);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
- /* Block until we know all outstanding shutdown jobs have completed. */
+ /*
+ * Block until we know all outstanding shutdown jobs have completed.
+ * Even if we don't queue the job, flush the wq to be sure it's
+ * been released.
+ */
flush_workqueue(vfio_irqfd_cleanup_wq);
}
@@ -392,8 +428,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
vfio_intx_set_signal(vdev, -1);
- virqfd_disable(vdev->ctx[0].unmask);
- virqfd_disable(vdev->ctx[0].mask);
+ virqfd_disable(vdev, &vdev->ctx[0].unmask);
+ virqfd_disable(vdev, &vdev->ctx[0].mask);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
@@ -539,8 +575,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
for (i = 0; i < vdev->num_ctx; i++) {
- virqfd_disable(vdev->ctx[i].unmask);
- virqfd_disable(vdev->ctx[i].mask);
+ virqfd_disable(vdev, &vdev->ctx[i].unmask);
+ virqfd_disable(vdev, &vdev->ctx[i].mask);
}
if (msix) {
@@ -577,7 +613,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
vfio_send_intx_eventfd, NULL,
&vdev->ctx[0].unmask, fd);
- virqfd_disable(vdev->ctx[0].unmask);
+ virqfd_disable(vdev, &vdev->ctx[0].unmask);
}
return 0;