summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acparser.h3
-rw-r--r--drivers/acpi/acpica/acpredef.h45
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/dsmethod.c5
-rw-r--r--drivers/acpi/acpica/hwpci.c9
-rw-r--r--drivers/acpi/acpica/nsprepkg.c13
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c3
-rw-r--r--drivers/acpi/acpica/utfileio.c9
-rw-r--r--drivers/acpi/acpica/uthex.c4
-rw-r--r--drivers/acpi/acpica/utxferror.c11
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/property.c54
-rw-r--r--drivers/acpi/resource.c160
-rw-r--r--drivers/acpi/scan.c30
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bus/mvebu-mbus.c109
-rw-r--r--drivers/dma/at_xdmac.c231
-rw-r--r--drivers/dma/dmaengine.c6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c17
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/iommu/intel-iommu.c31
-rw-r--r--drivers/irqchip/irq-mips-gic.c21
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c2
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c87
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/ntb/ntb_hw.c3
-rw-r--r--drivers/pnp/system.c35
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c54
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c6
-rw-r--r--drivers/virtio/virtio_pci_common.c1
60 files changed, 800 insertions, 455 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ab2cbb51c6aa..16da18597def 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -62,7 +62,7 @@ config ACPI_SLEEP
config ACPI_PROCFS_POWER
bool "Deprecated power /proc/acpi directories"
- depends on PROC_FS
+ depends on X86 && PROC_FS
help
For backwards compatibility, this option allows
deprecated power /proc/acpi/ directories to exist, even when
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 4169bb87a996..43685dd36c77 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -231,7 +231,9 @@ void acpi_db_open_debug_file(char *name);
acpi_status acpi_db_load_acpi_table(char *filename);
acpi_status
-acpi_db_get_table_from_file(char *filename, struct acpi_table_header **table);
+acpi_db_get_table_from_file(char *filename,
+ struct acpi_table_header **table,
+ u8 must_be_aml_table);
/*
* dbhistry - debugger HISTORY command
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 87b27521fcac..ffdb956391f6 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -352,11 +352,21 @@ struct acpi_package_info3 {
u16 reserved;
};
+struct acpi_package_info4 {
+ u8 type;
+ u8 object_type1;
+ u8 count1;
+ u8 sub_object_types;
+ u8 pkg_count;
+ u16 reserved;
+};
+
union acpi_predefined_info {
struct acpi_name_info info;
struct acpi_package_info ret_info;
struct acpi_package_info2 ret_info2;
struct acpi_package_info3 ret_info3;
+ struct acpi_package_info4 ret_info4;
};
/* Reset to default packing */
@@ -1165,4 +1175,9 @@ struct ah_uuid {
char *string;
};
+struct ah_table {
+ char *signature;
+ char *description;
+};
+
#endif /* __ACLOCAL_H__ */
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 74a390c6db16..0cdd2fce493a 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -70,6 +70,9 @@
*
*****************************************************************************/
+extern const u8 acpi_gbl_short_op_index[];
+extern const u8 acpi_gbl_long_op_index[];
+
/*
* psxface - Parser external interfaces
*/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index a972d11c97c9..b9474b529fcb 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -105,6 +105,11 @@
* count = 0 (optional)
* (Used for _DLM)
*
+ * ACPI_PTYPE2_VAR_VAR: Variable number of subpackages, each of either a
+ * constant or variable length. The subpackages are preceded by a
+ * constant number of objects.
+ * (Used for _LPI, _RDI)
+ *
* ACPI_PTYPE2_UUID_PAIR: Each subpackage is preceded by a UUID Buffer. The UUID
* defines the format of the package. Zero-length parent package is
* allowed.
@@ -123,7 +128,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_MIN = 8,
ACPI_PTYPE2_REV_FIXED = 9,
ACPI_PTYPE2_FIX_VAR = 10,
- ACPI_PTYPE2_UUID_PAIR = 11
+ ACPI_PTYPE2_VAR_VAR = 11,
+ ACPI_PTYPE2_UUID_PAIR = 12
};
/* Support macros for users of the predefined info table */
@@ -172,7 +178,7 @@ enum acpi_return_package_types {
* These are the names that can actually be evaluated via acpi_evaluate_object.
* Not present in this table are the following:
*
- * 1) Predefined/Reserved names that are never evaluated via
+ * 1) Predefined/Reserved names that are not usually evaluated via
* acpi_evaluate_object:
* _Lxx and _Exx GPE methods
* _Qxx EC methods
@@ -361,6 +367,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+ {{"_BTH", METHOD_1ARGS(ACPI_TYPE_INTEGER), /* ACPI 6.0 */
+ METHOD_NO_RETURN_VALUE}},
+
{{"_BTM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
@@ -390,6 +399,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0,
0, 0, 0),
+ {{"_CR3", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
{{"_CRS", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
@@ -445,7 +457,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
METHOD_NO_RETURN_VALUE}},
- {{"_DSD", METHOD_0ARGS,
+ {{"_DSD", METHOD_0ARGS, /* ACPI 6.0 */
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Buf, 1 Pkg */
PACKAGE_INFO(ACPI_PTYPE2_UUID_PAIR, ACPI_RTYPE_BUFFER, 1,
ACPI_RTYPE_PACKAGE, 1, 0),
@@ -604,6 +616,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (2 Int) */
PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
+ {{"_LPI", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (3 Int, n Pkg (10 Int/Buf) */
+ PACKAGE_INFO(ACPI_PTYPE2_VAR_VAR, ACPI_RTYPE_INTEGER, 3,
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER | ACPI_RTYPE_STRING,
+ 10, 0),
+
{{"_MAT", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
@@ -624,6 +642,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
ACPI_TYPE_INTEGER),
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+ {{"_MTL", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
{{"_NTT", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
@@ -716,6 +737,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+ {{"_PRR", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Ref) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_REFERENCE, 1, 0, 0, 0),
+
{{"_PRS", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
@@ -796,6 +821,11 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_PXM", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+ {{"_RDI", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int, n Pkg (m Ref)) */
+ PACKAGE_INFO(ACPI_PTYPE2_VAR_VAR, ACPI_RTYPE_INTEGER, 1,
+ ACPI_RTYPE_REFERENCE, 0, 0),
+
{{"_REG", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
METHOD_NO_RETURN_VALUE}},
@@ -808,6 +838,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_ROM", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+ {{"_RST", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_NO_RETURN_VALUE}},
+
{{"_RTV", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
@@ -935,6 +968,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_TDL", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+ {{"_TFP", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
{{"_TIP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
@@ -959,6 +995,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 5 Int with count */
PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+ {{"_TSN", METHOD_0ARGS, /* ACPI 6.0 */
+ METHOD_RETURNS(ACPI_RTYPE_REFERENCE)}},
+
{{"_TSP", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2b3c5bd222f1..d49f5c7a20d9 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -251,7 +251,7 @@ extern const u8 _acpi_ctype[];
#define _ACPI_DI 0x04 /* '0'-'9' */
#define _ACPI_LO 0x02 /* 'a'-'z' */
#define _ACPI_PU 0x10 /* punctuation */
-#define _ACPI_SP 0x08 /* space */
+#define _ACPI_SP 0x08 /* space, tab, CR, LF, VT, FF */
#define _ACPI_UP 0x01 /* 'A'-'Z' */
#define _ACPI_XD 0x80 /* '0'-'9', 'A'-'F', 'a'-'f' */
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d72565a3c646..85bb951430d9 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -116,6 +116,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
walk_state =
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) {
+ acpi_ps_free_op(op);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -125,6 +126,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
obj_desc->method.aml_length, NULL, 0);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
+ acpi_ps_free_op(op);
return_ACPI_STATUS(status);
}
@@ -133,9 +135,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
/* Parse the method, scan for creation of named objects */
status = acpi_ps_parse_aml(walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index c5214dec4988..f785ea788356 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -123,7 +123,7 @@ acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
acpi_handle root_pci_device, acpi_handle pci_region)
{
acpi_status status;
- struct acpi_pci_device *list_head = NULL;
+ struct acpi_pci_device *list_head;
ACPI_FUNCTION_TRACE(hw_derive_pci_id);
@@ -177,13 +177,13 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device,
acpi_handle parent_device;
acpi_status status;
struct acpi_pci_device *list_element;
- struct acpi_pci_device *list_head = NULL;
/*
* Ascend namespace branch until the root_pci_device is reached, building
* a list of device nodes. Loop will exit when either the PCI device is
* found, or the root of the namespace is reached.
*/
+ *return_list_head = NULL;
current_device = pci_region;
while (1) {
status = acpi_get_parent(current_device, &parent_device);
@@ -198,7 +198,6 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device,
/* Finished when we reach the PCI root device (PNP0A03 or PNP0A08) */
if (parent_device == root_pci_device) {
- *return_list_head = list_head;
return (AE_OK);
}
@@ -213,9 +212,9 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device,
/* Put new element at the head of the list */
- list_element->next = list_head;
+ list_element->next = *return_list_head;
list_element->device = parent_device;
- list_head = list_element;
+ *return_list_head = list_element;
current_device = parent_device;
}
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 8b79958b7aca..9bb251932b45 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -316,6 +316,13 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
acpi_ns_check_package_list(info, package, elements, count);
break;
+ case ACPI_PTYPE2_VAR_VAR:
+ /*
+ * Returns a variable list of packages, each with a variable list
+ * of objects.
+ */
+ break;
+
case ACPI_PTYPE2_UUID_PAIR:
/* The package must contain pairs of (UUID + type) */
@@ -487,6 +494,12 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
}
break;
+ case ACPI_PTYPE2_VAR_VAR:
+ /*
+ * Each subpackage has a fixed or variable number of elements
+ */
+ break;
+
case ACPI_PTYPE2_FIXED:
/* Each subpackage has a fixed length */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 151fcd95ba84..77d8103d0094 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -497,10 +497,10 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_REV_FIXED:
case ACPI_PTYPE2_FIX_VAR:
-
break;
default:
+ case ACPI_PTYPE2_VAR_VAR:
case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_OPTION:
return;
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 20e1a35169fc..58310907fa7b 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -50,9 +50,6 @@
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psopinfo")
-extern const u8 acpi_gbl_short_op_index[];
-extern const u8 acpi_gbl_long_op_index[];
-
static const u8 acpi_gbl_argument_count[] =
{ 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 7e1168be39fa..857af824337b 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -198,11 +198,8 @@ acpi_ut_read_table(FILE * fp,
table_header.length, file_size);
#ifdef ACPI_ASL_COMPILER
- status = fl_check_for_ascii(fp, NULL, FALSE);
- if (ACPI_SUCCESS(status)) {
- acpi_os_printf
- ("File appears to be ASCII only, must be binary\n");
- }
+ acpi_os_printf("File is corrupt or is ASCII text -- "
+ "it must be a binary file\n");
#endif
return (AE_BAD_HEADER);
}
@@ -315,7 +312,7 @@ acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
/* Get the entire file */
fprintf(stderr,
- "Loading Acpi table from file %10s - Length %.8u (%06X)\n",
+ "Reading ACPI table from file %10s - Length %.8u (0x%06X)\n",
filename, file_size, file_size);
status = acpi_ut_read_table(file, table, &table_length);
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index aa448278ba28..fda8b3def81c 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -75,9 +75,9 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_hex_char_to_value
+ * FUNCTION: acpi_ut_ascii_char_to_hex
*
- * PARAMETERS: ascii_char - Hex character in Ascii
+ * PARAMETERS: hex_char - Hex character in Ascii
*
* RETURN: The binary value of the ascii/hex character
*
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 306e785f9418..98d578753101 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -107,9 +107,16 @@ acpi_exception(const char *module_name,
va_list arg_list;
ACPI_MSG_REDIRECT_BEGIN;
- acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
- acpi_format_exception(status));
+ /* For AE_OK, just print the message */
+
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf(ACPI_MSG_EXCEPTION);
+
+ } else {
+ acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_format_exception(status));
+ }
va_start(arg_list, format);
acpi_os_vprintf(format, arg_list);
ACPI_MSG_SUFFIX;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ba4a61e964be..d93628c65661 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -191,6 +191,8 @@ bool acpi_osi_is_win8(void);
/*--------------------------------------------------------------------------
Device properties
-------------------------------------------------------------------------- */
+#define ACPI_DT_NAMESPACE_HID "PRP0001"
+
void acpi_init_properties(struct acpi_device *adev);
void acpi_free_properties(struct acpi_device *adev);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7ccba395c9dd..5226a8b921ae 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -175,11 +175,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
if (!addr || !length)
return;
- /* Resources are never freed */
- if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- request_region(addr, length, desc);
- else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- request_mem_region(addr, length, desc);
+ acpi_reserve_region(addr, length, gas->space_id, 0, desc);
}
static void __init acpi_reserve_resources(void)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 0d083736e25b..7836e2e980f4 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -79,50 +79,51 @@ static bool acpi_properties_format_valid(const union acpi_object *properties)
static void acpi_init_of_compatible(struct acpi_device *adev)
{
const union acpi_object *of_compatible;
- struct acpi_hardware_id *hwid;
- bool acpi_of = false;
int ret;
- /*
- * Check if the special PRP0001 ACPI ID is present and in that
- * case we fill in Device Tree compatible properties for this
- * device.
- */
- list_for_each_entry(hwid, &adev->pnp.ids, list) {
- if (!strcmp(hwid->id, "PRP0001")) {
- acpi_of = true;
- break;
- }
- }
-
- if (!acpi_of)
- return;
-
ret = acpi_dev_get_property_array(adev, "compatible", ACPI_TYPE_STRING,
&of_compatible);
if (ret) {
ret = acpi_dev_get_property(adev, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
- acpi_handle_warn(adev->handle,
- "PRP0001 requires compatible property\n");
+ if (adev->parent
+ && adev->parent->flags.of_compatible_ok)
+ goto out;
+
return;
}
}
adev->data.of_compatible = of_compatible;
+
+ out:
+ adev->flags.of_compatible_ok = 1;
}
void acpi_init_properties(struct acpi_device *adev)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ bool acpi_of = false;
+ struct acpi_hardware_id *hwid;
const union acpi_object *desc;
acpi_status status;
int i;
+ /*
+ * Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
+ * Device Tree compatible properties for this device.
+ */
+ list_for_each_entry(hwid, &adev->pnp.ids, list) {
+ if (!strcmp(hwid->id, ACPI_DT_NAMESPACE_HID)) {
+ acpi_of = true;
+ break;
+ }
+ }
+
status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL, &buf,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
- return;
+ goto out;
desc = buf.pointer;
if (desc->package.count % 2)
@@ -156,13 +157,20 @@ void acpi_init_properties(struct acpi_device *adev)
adev->data.pointer = buf.pointer;
adev->data.properties = properties;
- acpi_init_of_compatible(adev);
- return;
+ if (acpi_of)
+ acpi_init_of_compatible(adev);
+
+ goto out;
}
fail:
- dev_warn(&adev->dev, "Returned _DSD data is not valid, skipping\n");
+ dev_dbg(&adev->dev, "Returned _DSD data is not valid, skipping\n");
ACPI_FREE(buf.pointer);
+
+ out:
+ if (acpi_of && !adev->flags.of_compatible_ok)
+ acpi_handle_info(adev->handle,
+ ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
}
void acpi_free_properties(struct acpi_device *adev)
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 8244f013f210..fcb7807ea8b7 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,6 +26,7 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/ioport.h>
+#include <linux/list.h>
#include <linux/slab.h>
#ifdef CONFIG_X86
@@ -621,3 +622,162 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
+
+struct reserved_region {
+ struct list_head node;
+ u64 start;
+ u64 end;
+};
+
+static LIST_HEAD(reserved_io_regions);
+static LIST_HEAD(reserved_mem_regions);
+
+static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
+ char *desc)
+{
+ unsigned int length = end - start + 1;
+ struct resource *res;
+
+ res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
+ request_region(start, length, desc) :
+ request_mem_region(start, length, desc);
+ if (!res)
+ return -EIO;
+
+ res->flags &= ~flags;
+ return 0;
+}
+
+static int add_region_before(u64 start, u64 end, u8 space_id,
+ unsigned long flags, char *desc,
+ struct list_head *head)
+{
+ struct reserved_region *reg;
+ int error;
+
+ reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ error = request_range(start, end, space_id, flags, desc);
+ if (error)
+ return error;
+
+ reg->start = start;
+ reg->end = end;
+ list_add_tail(&reg->node, head);
+ return 0;
+}
+
+/**
+ * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
+ * @start: Starting address of the region.
+ * @length: Length of the region.
+ * @space_id: Identifier of address space to reserve the region from.
+ * @flags: Resource flags to clear for the region after requesting it.
+ * @desc: Region description (for messages).
+ *
+ * Reserve an I/O or memory region as a system resource to prevent others from
+ * using it. If the new region overlaps with one of the regions (in the given
+ * address space) already reserved by this routine, only the non-overlapping
+ * parts of it will be reserved.
+ *
+ * Returned is either 0 (success) or a negative error code indicating a resource
+ * reservation problem. It is the code of the first encountered error, but the
+ * routine doesn't abort until it has attempted to request all of the parts of
+ * the new region that don't overlap with other regions reserved previously.
+ *
+ * The resources requested by this routine are never released.
+ */
+int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
+ unsigned long flags, char *desc)
+{
+ struct list_head *regions;
+ struct reserved_region *reg;
+ u64 end = start + length - 1;
+ int ret = 0, error = 0;
+
+ if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ regions = &reserved_io_regions;
+ else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ regions = &reserved_mem_regions;
+ else
+ return -EINVAL;
+
+ if (list_empty(regions))
+ return add_region_before(start, end, space_id, flags, desc, regions);
+
+ list_for_each_entry(reg, regions, node)
+ if (reg->start == end + 1) {
+ /* The new region can be prepended to this one. */
+ ret = request_range(start, end, space_id, flags, desc);
+ if (!ret)
+ reg->start = start;
+
+ return ret;
+ } else if (reg->start > end) {
+ /* No overlap. Add the new region here and get out. */
+ return add_region_before(start, end, space_id, flags,
+ desc, &reg->node);
+ } else if (reg->end == start - 1) {
+ goto combine;
+ } else if (reg->end >= start) {
+ goto overlap;
+ }
+
+ /* The new region goes after the last existing one. */
+ return add_region_before(start, end, space_id, flags, desc, regions);
+
+ overlap:
+ /*
+ * The new region overlaps an existing one.
+ *
+ * The head part of the new region immediately preceding the existing
+ * overlapping one can be combined with it right away.
+ */
+ if (reg->start > start) {
+ error = request_range(start, reg->start - 1, space_id, flags, desc);
+ if (error)
+ ret = error;
+ else
+ reg->start = start;
+ }
+
+ combine:
+ /*
+ * The new region is adjacent to an existing one. If it extends beyond
+ * that region all the way to the next one, it is possible to combine
+ * all three of them.
+ */
+ while (reg->end < end) {
+ struct reserved_region *next = NULL;
+ u64 a = reg->end + 1, b = end;
+
+ if (!list_is_last(&reg->node, regions)) {
+ next = list_next_entry(reg, node);
+ if (next->start <= end)
+ b = next->start - 1;
+ }
+ error = request_range(a, b, space_id, flags, desc);
+ if (!error) {
+ if (next && next->start == b + 1) {
+ reg->end = next->end;
+ list_del(&next->node);
+ kfree(next);
+ } else {
+ reg->end = end;
+ break;
+ }
+ } else if (next) {
+ if (!ret)
+ ret = error;
+
+ reg = next;
+ } else {
+ break;
+ }
+ }
+
+ return ret ? ret : error;
+}
+EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 03141aa4ea95..0a099917a006 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -135,12 +135,13 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
struct acpi_hardware_id *id;
/*
- * Since we skip PRP0001 from the modalias below, 0 should be returned
- * if PRP0001 is the only ACPI/PNP ID in the device's list.
+ * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
+ * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
+ * device's list.
*/
count = 0;
list_for_each_entry(id, &acpi_dev->pnp.ids, list)
- if (strcmp(id->id, "PRP0001"))
+ if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
count++;
if (!count)
@@ -153,7 +154,7 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
size -= len;
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
- if (!strcmp(id->id, "PRP0001"))
+ if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
continue;
count = snprintf(&modalias[len], size, "%s:", id->id);
@@ -177,7 +178,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
* @size: Size of the buffer.
*
* Expose DT compatible modalias as of:NnameTCcompatible. This function should
- * only be called for devices having PRP0001 in their list of ACPI/PNP IDs.
+ * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
+ * ACPI/PNP IDs.
*/
static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
@@ -980,9 +982,9 @@ static void acpi_device_remove_files(struct acpi_device *dev)
* @adev: ACPI device object to match.
* @of_match_table: List of device IDs to match against.
*
- * If @dev has an ACPI companion which has the special PRP0001 device ID in its
- * list of identifiers and a _DSD object with the "compatible" property, use
- * that property to match against the given list of identifiers.
+ * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
+ * identifiers and a _DSD object with the "compatible" property, use that
+ * property to match against the given list of identifiers.
*/
static bool acpi_of_match_device(struct acpi_device *adev,
const struct of_device_id *of_match_table)
@@ -1038,14 +1040,14 @@ static const struct acpi_device_id *__acpi_match_device(
return id;
/*
- * Next, check the special "PRP0001" ID and try to match the
+ * Next, check ACPI_DT_NAMESPACE_HID and try to match the
* "compatible" property if found.
*
* The id returned by the below is not valid, but the only
* caller passing non-NULL of_ids here is only interested in
* whether or not the return value is NULL.
*/
- if (!strcmp("PRP0001", hwid->id)
+ if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
&& acpi_of_match_device(device, of_ids))
return id;
}
@@ -1671,7 +1673,7 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
static void acpi_wakeup_gpe_init(struct acpi_device *device)
{
- struct acpi_device_id button_device_ids[] = {
+ static const struct acpi_device_id button_device_ids[] = {
{"PNP0C0C", 0},
{"PNP0C0D", 0},
{"PNP0C0E", 0},
@@ -2405,7 +2407,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
}
static const struct acpi_device_id generic_device_ids[] = {
- {"PRP0001", },
+ {ACPI_DT_NAMESPACE_HID, },
{"", },
};
@@ -2413,8 +2415,8 @@ static int acpi_generic_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
/*
- * Since PRP0001 is the only ID handled here, the test below can be
- * unconditional.
+ * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test
+ * below can be unconditional.
*/
if (adev->data.of_compatible)
acpi_default_enumeration(adev);
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 23716dd8a7ec..5928d0746a27 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
writel((cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
hpriv->mmio + AHCI_WINDOW_CTRL(i));
- writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
+ writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
writel(((cs->size - 1) & 0xffff0000),
hpriv->mmio + AHCI_WINDOW_SIZE(i));
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 80a80548ad0a..27245957eee3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index eb1fed5bd516..3ccef9eba6f9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
config BLK_DEV_PMEM
tristate "Persistent memory block device support"
+ depends on HAS_IOMEM
help
Saying Y here will allow you to use a contiguous range of reserved
memory as one or more persistent block devices.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8dcbced0eafd..6e134f4759c0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
zram->max_comp_streams = 1;
+
set_capacity(zram->disk, 0);
+ part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index fb9ec6221730..6f047dcb94c2 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -58,7 +58,6 @@
#include <linux/debugfs.h>
#include <linux/log2.h>
#include <linux/syscore_ops.h>
-#include <linux/memblock.h>
/*
* DDR target is the same on all platforms.
@@ -70,6 +69,7 @@
*/
#define WIN_CTRL_OFF 0x0000
#define WIN_CTRL_ENABLE BIT(0)
+/* Only on HW I/O coherency capable platforms */
#define WIN_CTRL_SYNCBARRIER BIT(1)
#define WIN_CTRL_TGT_MASK 0xf0
#define WIN_CTRL_TGT_SHIFT 4
@@ -102,9 +102,7 @@
/* Relative to mbusbridge_base */
#define MBUS_BRIDGE_CTRL_OFF 0x0
-#define MBUS_BRIDGE_SIZE_MASK 0xffff0000
#define MBUS_BRIDGE_BASE_OFF 0x4
-#define MBUS_BRIDGE_BASE_MASK 0xffff0000
/* Maximum number of windows, for all known platforms */
#define MBUS_WINS_MAX 20
@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
(attr << WIN_CTRL_ATTR_SHIFT) |
(target << WIN_CTRL_TGT_SHIFT) |
- WIN_CTRL_SYNCBARRIER |
WIN_CTRL_ENABLE;
+ if (mbus->hw_io_coherency)
+ ctrl |= WIN_CTRL_SYNCBARRIER;
writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
writel(ctrl, addr + WIN_CTRL_OFF);
@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
return MVEBU_MBUS_NO_REMAP;
}
-/*
- * Use the memblock information to find the MBus bridge hole in the
- * physical address space.
- */
-static void __init
-mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
-{
- struct memblock_region *r;
- uint64_t s = 0;
-
- for_each_memblock(memory, r) {
- /*
- * This part of the memory is above 4 GB, so we don't
- * care for the MBus bridge hole.
- */
- if (r->base >= 0x100000000)
- continue;
-
- /*
- * The MBus bridge hole is at the end of the RAM under
- * the 4 GB limit.
- */
- if (r->base + r->size > s)
- s = r->base + r->size;
- }
-
- *start = s;
- *end = 0x100000000;
-}
-
static void __init
mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
{
int i;
int cs;
- uint64_t mbus_bridge_base, mbus_bridge_end;
mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
- mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
-
for (i = 0, cs = 0; i < 4; i++) {
- u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
- u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
- u64 end;
- struct mbus_dram_window *w;
-
- /* Ignore entries that are not enabled */
- if (!(size & DDR_SIZE_ENABLED))
- continue;
-
- /*
- * Ignore entries whose base address is above 2^32,
- * since devices cannot DMA to such high addresses
- */
- if (base & DDR_BASE_CS_HIGH_MASK)
- continue;
-
- base = base & DDR_BASE_CS_LOW_MASK;
- size = (size | ~DDR_SIZE_MASK) + 1;
- end = base + size;
-
- /*
- * Adjust base/size of the current CS to make sure it
- * doesn't overlap with the MBus bridge hole. This is
- * particularly important for devices that do DMA from
- * DRAM to a SRAM mapped in a MBus window, such as the
- * CESA cryptographic engine.
- */
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
/*
- * The CS is fully enclosed inside the MBus bridge
- * area, so ignore it.
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
*/
- if (base >= mbus_bridge_base && end <= mbus_bridge_end)
- continue;
+ if ((size & DDR_SIZE_ENABLED) &&
+ !(base & DDR_BASE_CS_HIGH_MASK)) {
+ struct mbus_dram_window *w;
- /*
- * Beginning of CS overlaps with end of MBus, raise CS
- * base address, and shrink its size.
- */
- if (base >= mbus_bridge_base && end > mbus_bridge_end) {
- size -= mbus_bridge_end - base;
- base = mbus_bridge_end;
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base & DDR_BASE_CS_LOW_MASK;
+ w->size = (size | ~DDR_SIZE_MASK) + 1;
}
-
- /*
- * End of CS overlaps with beginning of MBus, shrink
- * CS size.
- */
- if (base < mbus_bridge_base && end > mbus_bridge_base)
- size -= end - mbus_bridge_base;
-
- w = &mvebu_mbus_dram_info.cs[cs++];
- w->cs_index = i;
- w->mbus_attr = 0xf & ~(1 << i);
- if (mbus->hw_io_coherency)
- w->mbus_attr |= ATTR_HW_COHERENCY;
- w->base = base;
- w->size = size;
}
mvebu_mbus_dram_info.num_cs = cs;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 933e4b338459..7992164ea9ec 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -174,6 +174,8 @@
#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
#define AT_XDMAC_MAX_CHAN 0x20
+#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
+#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
#define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -192,20 +194,17 @@ struct at_xdmac_chan {
struct dma_chan chan;
void __iomem *ch_regs;
u32 mask; /* Channel Mask */
- u32 cfg[2]; /* Channel Configuration Register */
- #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
- #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
+ u32 cfg; /* Channel Configuration Register */
u8 perid; /* Peripheral ID */
u8 perif; /* Peripheral Interface */
u8 memif; /* Memory Interface */
- u32 per_src_addr;
- u32 per_dst_addr;
u32 save_cc;
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
unsigned long status;
struct tasklet_struct tasklet;
+ struct dma_slave_config sconfig;
spinlock_t lock;
@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_xdmac_desc *desc = txd_to_at_desc(tx);
struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long irqflags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, irqflags);
cookie = dma_cookie_assign(tx);
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
if (list_is_singular(&atchan->xfers_list))
at_xdmac_start_xfer(atchan, desc);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
return cookie;
}
@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
return chan;
}
+static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
+ enum dma_transfer_direction direction)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ int csize, dwidth;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ atchan->cfg =
+ AT91_XDMAC_DT_PERID(atchan->perid)
+ | AT_XDMAC_CC_DAM_INCREMENTED_AM
+ | AT_XDMAC_CC_SAM_FIXED_AM
+ | AT_XDMAC_CC_DIF(atchan->memif)
+ | AT_XDMAC_CC_SIF(atchan->perif)
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+ | AT_XDMAC_CC_DSYNC_PER2MEM
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
+ csize = ffs(atchan->sconfig.src_maxburst) - 1;
+ if (csize < 0) {
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ return -EINVAL;
+ }
+ atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
+ dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
+ if (dwidth < 0) {
+ dev_err(chan2dev(chan), "invalid src addr width value\n");
+ return -EINVAL;
+ }
+ atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
+ } else if (direction == DMA_MEM_TO_DEV) {
+ atchan->cfg =
+ AT91_XDMAC_DT_PERID(atchan->perid)
+ | AT_XDMAC_CC_DAM_FIXED_AM
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
+ | AT_XDMAC_CC_DIF(atchan->perif)
+ | AT_XDMAC_CC_SIF(atchan->memif)
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+ | AT_XDMAC_CC_DSYNC_MEM2PER
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
+ csize = ffs(atchan->sconfig.dst_maxburst) - 1;
+ if (csize < 0) {
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ return -EINVAL;
+ }
+ atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
+ dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
+ if (dwidth < 0) {
+ dev_err(chan2dev(chan), "invalid dst addr width value\n");
+ return -EINVAL;
+ }
+ atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
+ }
+
+ dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
+
+ return 0;
+}
+
+/*
+ * Only check that maxburst and addr width values are supported by the
+ * the controller but not that the configuration is good to perform the
+ * transfer since we don't know the direction at this stage.
+ */
+static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
+{
+ if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
+ || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
+ return -EINVAL;
+
+ if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
+ || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
+ return -EINVAL;
+
+ return 0;
+}
+
static int at_xdmac_set_slave_config(struct dma_chan *chan,
struct dma_slave_config *sconfig)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
- u8 dwidth;
- int csize;
- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
- AT91_XDMAC_DT_PERID(atchan->perid)
- | AT_XDMAC_CC_DAM_INCREMENTED_AM
- | AT_XDMAC_CC_SAM_FIXED_AM
- | AT_XDMAC_CC_DIF(atchan->memif)
- | AT_XDMAC_CC_SIF(atchan->perif)
- | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
- | AT_XDMAC_CC_DSYNC_PER2MEM
- | AT_XDMAC_CC_MBSIZE_SIXTEEN
- | AT_XDMAC_CC_TYPE_PER_TRAN;
- csize = at_xdmac_csize(sconfig->src_maxburst);
- if (csize < 0) {
- dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ if (at_xdmac_check_slave_config(sconfig)) {
+ dev_err(chan2dev(chan), "invalid slave configuration\n");
return -EINVAL;
}
- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
- dwidth = ffs(sconfig->src_addr_width) - 1;
- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
-
-
- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
- AT91_XDMAC_DT_PERID(atchan->perid)
- | AT_XDMAC_CC_DAM_FIXED_AM
- | AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(atchan->perif)
- | AT_XDMAC_CC_SIF(atchan->memif)
- | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
- | AT_XDMAC_CC_DSYNC_MEM2PER
- | AT_XDMAC_CC_MBSIZE_SIXTEEN
- | AT_XDMAC_CC_TYPE_PER_TRAN;
- csize = at_xdmac_csize(sconfig->dst_maxburst);
- if (csize < 0) {
- dev_err(chan2dev(chan), "invalid src maxburst value\n");
- return -EINVAL;
- }
- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
- dwidth = ffs(sconfig->dst_addr_width) - 1;
- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
-
- /* Src and dst addr are needed to configure the link list descriptor. */
- atchan->per_src_addr = sconfig->src_addr;
- atchan->per_dst_addr = sconfig->dst_addr;
- dev_dbg(chan2dev(chan),
- "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
- __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
- atchan->per_src_addr, atchan->per_dst_addr);
+ memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
return 0;
}
@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
int i;
unsigned int xfer_size = 0;
+ unsigned long irqflags;
+ struct dma_async_tx_descriptor *ret = NULL;
if (!sgl)
return NULL;
@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
flags);
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, irqflags);
+
+ if (at_xdmac_compute_chan_conf(chan, direction))
+ goto spin_unlock;
/* Prepare descriptors. */
for_each_sg(sgl, sg, sg_len, i) {
@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
if (unlikely(!len)) {
dev_err(chan2dev(chan), "sg data length is zero\n");
- spin_unlock_bh(&atchan->lock);
- return NULL;
+ goto spin_unlock;
}
dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
__func__, i, len, mem);
@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
list_splice_init(&first->descs_list, &atchan->free_descs_list);
- spin_unlock_bh(&atchan->lock);
- return NULL;
+ goto spin_unlock;
}
/* Linked list descriptor setup. */
if (direction == DMA_DEV_TO_MEM) {
- desc->lld.mbr_sa = atchan->per_src_addr;
+ desc->lld.mbr_sa = atchan->sconfig.src_addr;
desc->lld.mbr_da = mem;
- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
} else {
desc->lld.mbr_sa = mem;
- desc->lld.mbr_da = atchan->per_dst_addr;
- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ desc->lld.mbr_da = atchan->sconfig.dst_addr;
}
+ desc->lld.mbr_cfg = atchan->cfg;
dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
xfer_size += len;
}
- spin_unlock_bh(&atchan->lock);
first->tx_dma_desc.flags = flags;
first->xfer_size = xfer_size;
first->direction = direction;
+ ret = &first->tx_dma_desc;
- return &first->tx_dma_desc;
+spin_unlock:
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
+ return ret;
}
static struct dma_async_tx_descriptor *
@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
struct at_xdmac_desc *first = NULL, *prev = NULL;
unsigned int periods = buf_len / period_len;
int i;
+ unsigned long irqflags;
dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
__func__, &buf_addr, buf_len, period_len,
@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
return NULL;
}
+ if (at_xdmac_compute_chan_conf(chan, direction))
+ return NULL;
+
for (i = 0; i < periods; i++) {
struct at_xdmac_desc *desc = NULL;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, irqflags);
desc = at_xdmac_get_desc(atchan);
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
list_splice_init(&first->descs_list, &atchan->free_descs_list);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
return NULL;
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
dev_dbg(chan2dev(chan),
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
__func__, desc, &desc->tx_dma_desc.phys);
if (direction == DMA_DEV_TO_MEM) {
- desc->lld.mbr_sa = atchan->per_src_addr;
+ desc->lld.mbr_sa = atchan->sconfig.src_addr;
desc->lld.mbr_da = buf_addr + i * period_len;
- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
} else {
desc->lld.mbr_sa = buf_addr + i * period_len;
- desc->lld.mbr_da = atchan->per_dst_addr;
- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ desc->lld.mbr_da = atchan->sconfig.dst_addr;
}
+ desc->lld.mbr_cfg = atchan->cfg;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
| AT_XDMAC_MBR_UBC_NDEN
| AT_XDMAC_MBR_UBC_NSEN
@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
| AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
+ unsigned long irqflags;
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
__func__, &src, &dest, len, flags);
@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, irqflags);
desc = at_xdmac_get_desc(atchan);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
int residue;
u32 cur_nda, mask, value;
u8 dwidth = 0;
+ unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if (!txstate)
return ret;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
*/
if (!desc->active_xfer) {
dma_set_residue(txstate, desc->xfer_size);
- spin_unlock_bh(&atchan->lock);
- return ret;
+ goto spin_unlock;
}
residue = desc->xfer_size;
@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
- spin_unlock_bh(&atchan->lock);
-
dma_set_residue(txstate, residue);
dev_dbg(chan2dev(chan),
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
__func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
+spin_unlock:
+ spin_unlock_irqrestore(&atchan->lock, flags);
return ret;
}
@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
struct at_xdmac_desc *desc;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
/*
* If channel is enabled, do nothing, advance_work will be triggered
@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
at_xdmac_start_xfer(atchan, desc);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
int ret;
+ unsigned long flags;
dev_dbg(chan2dev(chan), "%s\n", __func__);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
ret = at_xdmac_set_slave_config(chan, config);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return ret;
}
@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ unsigned long flags;
dev_dbg(chan2dev(chan), "%s\n", __func__);
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
return 0;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
cpu_relax();
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return 0;
}
@@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ unsigned long flags;
dev_dbg(chan2dev(chan), "%s\n", __func__);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (!at_xdmac_chan_is_paused(atchan)) {
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return 0;
}
at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return 0;
}
@@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
struct at_xdmac_desc *desc, *_desc;
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ unsigned long flags;
dev_dbg(chan2dev(chan), "%s\n", __func__);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
cpu_relax();
@@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
at_xdmac_remove_xfer(atchan, desc);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return 0;
}
@@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *desc;
int i;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (at_xdmac_chan_is_enabled(atchan)) {
dev_err(chan2dev(chan),
@@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
spin_unlock:
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return i;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2890d744bb1b..3ddfd1f6c23c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->directions = device->directions;
caps->residue_granularity = device->residue_granularity;
- caps->cmd_pause = !!device->device_pause;
+ /*
+ * Some devices implement only pause (e.g. to get residuum) but no
+ * resume. However cmd_pause is advertised as pause AND resume.
+ */
+ caps->cmd_pause = !!(device->device_pause && device->device_resume);
caps->cmd_terminate = !!device->device_terminate_all;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 56e437e31580..ae628001fd97 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset;
+ int i = 0, inc, try = 0, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = dev_priv->gpio_mmio_base;
+retry:
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
- for (i = 0; i < num; i++) {
+ for (; i < num; i += inc) {
+ inc = 1;
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
- i += 1; /* set i to the index of the read xfer */
+ inc = 2; /* an index read is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
@@ -525,6 +527,18 @@ clear_err:
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ /*
+ * Passive adapters sometimes NAK the first probe. Retry the first
+ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+ * has retries internally. See also the retry loop in
+ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+ */
+ if (ret == -ENXIO && i == 0 && try++ == 0) {
+ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
+ goto retry;
+ }
+
goto out;
timeout:
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e87d2f418de4..987b81f31b0e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising analog device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e597ffc26563..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
- /* if there is no audio, set MINM_OVER_MAXP */
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
- (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
- drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b7ca4c514621..a7fdfa4f0857 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ /*
+ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+ * after the CP ring have chew one packet at least. Hence here we stop
+ * and restart DPM after the radeon_ib_ring_tests().
+ */
+ if (rdev->pm.dpm_enabled &&
+ (rdev->pm.pm_method == PM_METHOD_DPM) &&
+ (rdev->family == CHIP_TURKS) &&
+ (rdev->flags & RADEON_IS_MOBILITY)) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_dpm_disable(rdev);
+ radeon_dpm_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index de42fc4a22b8..9c3377ca17b7 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
} else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
}
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto error_unreserve;
}
tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_vm_clear_bo(rdev, pt);
if (r) {
radeon_bo_unref(&pt);
- radeon_bo_reserve(bo_va->bo, false);
return r;
}
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
return 0;
+
+error_unreserve:
+ radeon_bo_unreserve(bo_va->bo);
+ return r;
}
/**
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 630af73e98c4..35c8d0ceabee 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -151,6 +151,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1024, 5112, 2024, 4832
},
{
+ (const char * const []){"LEN2000", NULL},
+ {ANY_BOARD_ID, ANY_BOARD_ID},
+ 1024, 5113, 2021, 4832
+ },
+ {
(const char * const []){"LEN2001", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
1024, 5022, 2508, 4832
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0045",
"LEN0047",
"LEN0049",
- "LEN2000",
+ "LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
"LEN2003",
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 68d43beccb7e..5ecfaf29933a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations — the ones with
+ * PASID support on bit 28 — have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable supported super page\n");
intel_iommu_superpage = 0;
+ } else if (!strncmp(str, "ecs_off", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable extended context table support\n");
+ intel_iommu_ecs = 0;
}
str += strcspn(str, ",");
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
- if (ecap_ecs(iommu->ecap)) {
+ if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
@@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
return &context[devfn];
}
+static int iommu_dummy(struct device *dev)
+{
+ return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
u16 segment = 0;
int i;
+ if (iommu_dummy(dev))
+ return NULL;
+
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
segment = pci_domain_nr(pdev->bus);
@@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
if (context)
free_pgtable_page(context);
- if (!ecap_ecs(iommu->ecap))
+ if (!ecs_enabled(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
- if (ecap_ecs(iommu->ecap))
+ if (ecs_enabled(iommu))
addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
@@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
return __get_valid_domain_for_dev(dev);
}
-static int iommu_dummy(struct device *dev)
-{
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
/* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev)
{
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 57f09cb54464..269c2354c431 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void)
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}
-static void gic_handle_shared_int(void)
+static void gic_handle_shared_int(bool chained)
{
unsigned int i, intr, virq;
unsigned long *pcpu_mask;
@@ -299,7 +299,10 @@ static void gic_handle_shared_int(void)
while (intr != gic_shared_intrs) {
virq = irq_linear_revmap(gic_irq_domain,
GIC_SHARED_TO_HWIRQ(intr));
- do_IRQ(virq);
+ if (chained)
+ generic_handle_irq(virq);
+ else
+ do_IRQ(virq);
/* go to next pending bit */
bitmap_clear(pending, intr, 1);
@@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = {
#endif
};
-static void gic_handle_local_int(void)
+static void gic_handle_local_int(bool chained)
{
unsigned long pending, masked;
unsigned int intr, virq;
@@ -445,7 +448,10 @@ static void gic_handle_local_int(void)
while (intr != GIC_NUM_LOCAL_INTRS) {
virq = irq_linear_revmap(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(intr));
- do_IRQ(virq);
+ if (chained)
+ generic_handle_irq(virq);
+ else
+ do_IRQ(virq);
/* go to next pending bit */
bitmap_clear(&pending, intr, 1);
@@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = {
static void __gic_irq_dispatch(void)
{
- gic_handle_local_int();
- gic_handle_shared_int();
+ gic_handle_local_int(false);
+ gic_handle_shared_int(false);
}
static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
{
- __gic_irq_dispatch();
+ gic_handle_local_int(true);
+ gic_handle_shared_int(true);
}
#ifdef CONFIG_MIPS_GIC_IPI
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 4a9ce5b50c5b..6b2b582433bd 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
irqd_set_trigger_type(data, flow_type);
irq_setup_alt_chip(data, flow_type);
- for (i = 0; i <= gc->num_ct; i++, ct++)
+ for (i = 0; i < gc->num_ct; i++, ct++)
if (ct->type & flow_type)
ctrl_off = ct->regs.type;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 27506302eb7a..4dbed4a67aaf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
}
spin_unlock(&mddev->lock);
- return err;
+ return err ?: len;
}
err = mddev_lock(mddev);
if (err)
@@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- flush_workqueue(md_misc_wq);
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- if (mddev_lock(mddev) == 0) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+ mddev_lock(mddev) == 0) {
+ flush_workqueue(md_misc_wq);
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
- mddev_unlock(mddev);
}
+ mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e793ab6b3570..f55c3f35b746 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 553d54b87052..b6793d2e051f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index db84ddcfec84..9fd6c69a8bac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data)
if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
if (pdata->per_channel_irq)
- disable_irq(channel->dma_irq);
+ disable_irq_nosync(channel->dma_irq);
else
xgbe_disable_rx_tx_ints(pdata);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 77363d680532..a3b1c07ae0af 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2464,6 +2464,7 @@ err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
err_out_free_dev:
+ netif_napi_del(&bp->napi);
free_netdev(dev);
out:
@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
+ netif_napi_del(&bp->napi);
free_netdev(dev);
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
ssb_set_drvdata(sdev, NULL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e7651b3c6c57..420949cc55aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
phy_name = "external RGMII (no delay)";
else
phy_name = "external RGMII (TX delay)";
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg |= RGMII_MODE_EN | id_mode_dis;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
bcmgenet_sys_writel(priv,
PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
break;
@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
return -EINVAL;
}
+ /* This is an external PHY (xMII), so we need to enable the RGMII
+ * block for the interface to work
+ */
+ if (priv->ext_phy) {
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg |= RGMII_MODE_EN | id_mode_dis;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ }
+
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 28d9ca675a27..68d47b196dae 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_devcmd_fw_info *fw_info;
+ int err;
- enic_dev_fw_info(enic, &fw_info);
+ err = enic_dev_fw_info(enic, &fw_info);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ * For other failures, like devcmd failure, we return previously
+ * recorded info.
+ */
+ if (err == -ENOMEM)
+ return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
-
- enic_dev_stats_dump(enic, &vstats);
+ int err;
+
+ err = enic_dev_stats_dump(enic, &vstats);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ * For other failures, like devcmd failure, we return previously
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+ return;
for (i = 0; i < enic_n_tx_stats; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 204bd182473b..eadae1b412c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ int err;
- enic_dev_stats_dump(enic, &stats);
+ err = enic_dev_stats_dump(enic, &stats);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ * For other failures, like devcmd failure, we return previously
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+ return net_stats;
net_stats->tx_packets = stats->tx.tx_frames_ok;
net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_poll_unlock_napi(&enic->rq[rq]);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
- enic_poll_unlock_napi(&enic->rq[rq]);
return work_done;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 36a2ed606c91..c4b2183bf352 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
struct vnic_rq_buf *buf;
u32 fetch_index;
unsigned int count = rq->ring.desc_count;
+ int i;
buf = rq->to_clean;
- while (vnic_rq_desc_used(rq) > 0) {
-
+ for (i = 0; i < rq->ring.desc_count; i++) {
(*buf_clean)(rq, buf);
-
- buf = rq->to_clean = buf->next;
- rq->ring.desc_avail++;
+ buf = buf->next;
}
+ rq->ring.desc_avail = rq->ring.desc_count - 1;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fb140faeafb1..c5e1d0ac75f9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
- get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure while reading FAT data\n");
@@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
- pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va, get_fat_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -EINVAL;
cmd.size = sizeof(struct be_cmd_resp_port_type);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
}
- memset(cmd.va, 0, cmd.size);
spin_lock_bh(&adapter->mcc_lock);
@@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
}
err:
spin_unlock_bh(&adapter->mcc_lock);
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
- attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ attribs_cmd.size,
+ &attribs_cmd.dma, GFP_ATOMIC);
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
err:
mutex_unlock(&adapter->mbox_lock);
if (attribs_cmd.va)
- pci_free_consistent(adapter->pdev, attribs_cmd.size,
- attribs_cmd.va, attribs_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+ attribs_cmd.va, attribs_cmd.dma);
return status;
}
@@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
- get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma,
+ GFP_ATOMIC);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
out:
spin_unlock_bh(&adapter->mcc_lock);
- pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
@@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
err:
mutex_unlock(&adapter->mbox_lock);
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
@@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
+ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va)
return -ENOMEM;
@@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
err:
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
return status;
}
@@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
+ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va) {
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
level = cfgs->module[0].trace_lvl[j].dbg_lvl;
}
}
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
err:
return level;
}
@@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
err:
mutex_unlock(&adapter->mbox_lock);
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
@@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
@@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
res->vf_if_cap_flags = vf_res->cap_flags;
err:
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
@@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
@@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
status = be_cmd_notify_wait(adapter, &wrb);
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b765c24625bf..2835dee5dc39 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK;
- read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
+ &read_cmd.dma, GFP_ATOMIC);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
break;
}
}
- pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
return status;
}
@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
- &ddrdma_cmd.dma, GFP_KERNEL);
+ ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ ddrdma_cmd.size, &ddrdma_cmd.dma,
+ GFP_KERNEL);
if (!ddrdma_cmd.va)
return -ENOMEM;
@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
- &eeprom_cmd.dma, GFP_KERNEL);
+ eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ eeprom_cmd.size, &eeprom_cmd.dma,
+ GFP_KERNEL);
if (!eeprom_cmd.va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f9ffb9026cd..e43cc8a73ea7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
}
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
- GFP_KERNEL);
+ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter)
int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
- &mbox_mem_alloc->dma,
- GFP_KERNEL);
+ mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va)
return -ENOMEM;
mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
- memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 33c35d3b7420..5d47307121ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -317,6 +317,7 @@ struct i40e_pf {
#endif
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 34170eabca7d..da0faf478af0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ /* By default we are in VEPA mode, if this is the first VF/VMDq
+ * VSI to be added switch to VEB mode.
+ */
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf,
+ BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ }
+
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
if (vsi)
dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a54c14491e3b..5b5bea159bd5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */
@@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
} else if (mode != veb->bridge_mode) {
/* Existing HW bridge but different mode needs reset */
veb->bridge_mode = mode;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
+ if (mode == BRIDGE_MODE_VEB)
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
break;
}
}
@@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+ (i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |=
- cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
- cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__);
return NULL;
}
+ /* We come up by default in VEPA mode if SRIOV is not
+ * already enabled, in which case we can't force VEPA
+ * mode.
+ */
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ }
i40e_config_bridge_mode(veb);
}
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
+#ifdef CONFIG_PCI_IOV
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ if (pci_num_vf(pdev))
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ }
+#endif
err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4bd3a80aba82..9d95042d5a0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @tx_flags: collected send information
- * @hdr_len: size of the packet header
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
- const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
{
struct skb_frag_struct *frag;
bool linearize = false;
@@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
- u16 j = 1;
+ u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done;
@@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done;
}
frag = &skb_shinfo(skb)->frags[0];
- size = hdr_len;
/* we might still have more fragments per segment */
do {
size += skb_frag_size(frag);
frag++; j++;
+ if ((size >= skb_shinfo(skb)->gso_size) &&
+ (j < I40E_MAX_BUFFER_TXD)) {
+ size = (size % skb_shinfo(skb)->gso_size);
+ j = (size) ? 1 : 0;
+ }
if (j == I40E_MAX_BUFFER_TXD) {
- if (size < skb_shinfo(skb)->gso_size) {
- linearize = true;
- break;
- }
- j = 1;
- size -= skb_shinfo(skb)->gso_size;
- if (size)
- j++;
- size += hdr_len;
+ linearize = true;
+ break;
}
num_frags--;
} while (num_frags);
@@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb))
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78d1c4ff565e..4e9376da0518 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
- if (num_vfs)
+ if (num_vfs) {
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf,
+ BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ }
return i40e_pci_sriov_enable(pdev, num_vfs);
+ }
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b077e02a0cc7..458fbb421090 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @tx_flags: collected send information
- * @hdr_len: size of the packet header
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
- const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
{
struct skb_frag_struct *frag;
bool linearize = false;
@@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
- u16 j = 1;
+ u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done;
@@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done;
}
frag = &skb_shinfo(skb)->frags[0];
- size = hdr_len;
/* we might still have more fragments per segment */
do {
size += skb_frag_size(frag);
frag++; j++;
+ if ((size >= skb_shinfo(skb)->gso_size) &&
+ (j < I40E_MAX_BUFFER_TXD)) {
+ size = (size % skb_shinfo(skb)->gso_size);
+ j = (size) ? 1 : 0;
+ }
if (j == I40E_MAX_BUFFER_TXD) {
- if (size < skb_shinfo(skb)->gso_size) {
- linearize = true;
- break;
- }
- j = 1;
- size -= skb_shinfo(skb)->gso_size;
- if (size)
- j++;
- size += hdr_len;
+ linearize = true;
+ break;
}
num_frags--;
} while (num_frags);
@@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
- if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb))
goto out_drop;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e3b9b63ad010..c3a9392cbc19 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
- wr32(trgttiml, rq->perout.start.sec);
- wr32(trgttimh, rq->perout.start.nsec);
+ wr32(trgttimh, rq->perout.start.sec);
+ wr32(trgttiml, rq->perout.start.nsec);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
} else {
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index cd29b1038c5e..15f9b7c9e4d3 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev)
u32 ppd;
ndev->hw_type = BWD_HW;
+ ndev->limits.max_mw = BWD_MAX_MW;
rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
if (rc)
@@ -1778,7 +1779,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
MW_TO_BAR(i));
rc = -EIO;
- goto err3;
+ goto err4;
}
}
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 49c1720df59a..515f33882ab8 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,6 +7,7 @@
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
+#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -22,25 +23,41 @@ static const struct pnp_device_id pnp_dev_table[] = {
{"", 0}
};
+#ifdef CONFIG_ACPI
+static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
+{
+ u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
+}
+#else
+static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
+{
+ struct resource *res;
+
+ res = io ? request_region(start, length, desc) :
+ request_mem_region(start, length, desc);
+ if (res) {
+ res->flags &= ~IORESOURCE_BUSY;
+ return true;
+ }
+ return false;
+}
+#endif
+
static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
resource_size_t start = r->start, end = r->end;
- struct resource *res;
+ bool reserved;
regionid = kmalloc(16, GFP_KERNEL);
if (!regionid)
return;
snprintf(regionid, 16, "pnp %s", pnpid);
- if (port)
- res = request_region(start, end - start + 1, regionid);
- else
- res = request_mem_region(start, end - start + 1, regionid);
- if (res)
- res->flags &= ~IORESOURCE_BUSY;
- else
+ reserved = __reserve_range(start, end - start + 1, !!port, regionid);
+ if (!reserved)
kfree(regionid);
/*
@@ -49,7 +66,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
* have double reservations.
*/
dev_info(&dev->dev, "%pR %s reserved\n", r,
- res ? "has been" : "could not be");
+ reserved ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index bcdb22d5e215..3c1850332a90 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -4,6 +4,7 @@
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
depends on ARCH_MEDIATEK
+ depends on RESET_CONTROLLER
select REGMAP
help
Say yes here to add support for MediaTek PMIC Wrapper found
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index db5be1eec54c..f432291feee9 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
int ret;
- u32 val;
-
- val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
- if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
- pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret)
@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
int ret;
- u32 val;
-
- val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
- if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
- pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret)
@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
*rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
return 0;
}
@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
{
- unsigned long rate_spi;
- int ck_mhz;
-
- rate_spi = clk_get_rate(wrp->clk_spi);
-
- if (rate_spi > 26000000)
- ck_mhz = 26;
- else if (rate_spi > 18000000)
- ck_mhz = 18;
- else
- ck_mhz = 0;
-
- switch (ck_mhz) {
- case 18:
- if (pwrap_is_mt8135(wrp))
- pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
- break;
- case 26:
- if (pwrap_is_mt8135(wrp))
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ if (pwrap_is_mt8135(wrp)) {
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
- break;
- case 0:
- if (pwrap_is_mt8135(wrp))
- pwrap_writel(wrp, 0xf, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
- break;
- default:
- return -EINVAL;
+ } else {
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
}
return 0;
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 09428412139e..c5352ea4821e 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc)
u32 crystalfreq;
const struct pmu0_plltab_entry *e = NULL;
- crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
- SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
+ crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
+ SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
e = pmu0_plltab_find_entry(crystalfreq);
BUG_ON(!e);
return e->freq * 1000;
@@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
switch (bus->chip_id) {
case 0x5354:
- ssb_pmu_get_alp_clock_clk0(cc);
+ return ssb_pmu_get_alp_clock_clk0(cc);
default:
ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
bus->chip_id);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index e894eb278d83..eba1b7ac7294 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
+ cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
irq_set_affinity_hint(irq, mask);
}