summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd_genhd.c20
-rw-r--r--drivers/s390/block/dasd_ioctl.c76
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c40
-rw-r--r--drivers/s390/cio/chsc.h50
-rw-r--r--drivers/s390/cio/device_ops.c23
-rw-r--r--drivers/s390/cio/idset.c12
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/cio/qdio_main.c299
-rw-r--r--drivers/s390/cio/qdio_setup.c100
-rw-r--r--drivers/s390/cio/qdio_thinint.c61
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c148
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c19
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c165
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c65
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h16
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h30
-rw-r--r--drivers/s390/crypto/ap_bus.c94
-rw-r--r--drivers/s390/crypto/ap_bus.h25
-rw-r--r--drivers/s390/crypto/ap_card.c47
-rw-r--r--drivers/s390/crypto/ap_queue.c10
-rw-r--r--drivers/s390/net/Kconfig9
-rw-r--r--drivers/s390/net/ctcm_main.c40
-rw-r--r--drivers/s390/net/ism_drv.c4
-rw-r--r--drivers/s390/net/lcs.c59
-rw-r--r--drivers/s390/net/netiucv.c104
-rw-r--r--drivers/s390/net/qeth_core.h49
-rw-r--r--drivers/s390/net/qeth_core_main.c506
-rw-r--r--drivers/s390/net/qeth_core_mpc.h25
-rw-r--r--drivers/s390/net/qeth_core_sys.c15
-rw-r--r--drivers/s390/net/qeth_l2_main.c200
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/s390/net/smsgiucv.c65
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_diag.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c84
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c76
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c131
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c16
42 files changed, 1651 insertions, 1111 deletions
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 7d079154f849..af5b0ecb8f89 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -143,9 +143,6 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- /* The two structs have 168/176 byte on 31/64 bit. */
- struct blkpg_partition bpart;
- struct blkpg_ioctl_arg barg;
struct block_device *bdev;
/*
@@ -155,19 +152,10 @@ void dasd_destroy_partitions(struct dasd_block *block)
bdev = block->bdev;
block->bdev = NULL;
- /*
- * See fs/partition/check.c:delete_partition
- * Can't call delete_partitions directly. Use ioctl.
- * The ioctl also does locking and invalidation.
- */
- memset(&bpart, 0, sizeof(struct blkpg_partition));
- memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
- barg.data = (void __force __user *) &bpart;
- barg.op = BLKPG_DEL_PARTITION;
- for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
- ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
-
- invalidate_partition(block->gdp, 0);
+ mutex_lock(&bdev->bd_mutex);
+ blk_drop_partitions(bdev);
+ mutex_unlock(&bdev->bd_mutex);
+
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, FMODE_READ);
set_capacity(block->gdp, 0);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9a5f3add325f..777734d1b4e5 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -22,6 +22,7 @@
#include <asm/schid.h>
#include <asm/cmb.h>
#include <linux/uaccess.h>
+#include <linux/dasd_mod.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_ioctl:"
@@ -457,10 +458,9 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
/*
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
*/
-static int dasd_ioctl_information(struct dasd_block *block,
- unsigned int cmd, void __user *argp)
+static int __dasd_ioctl_information(struct dasd_block *block,
+ struct dasd_information2_t *dasd_info)
{
- struct dasd_information2_t *dasd_info;
struct subchannel_id sch_id;
struct ccw_dev_id dev_id;
struct dasd_device *base;
@@ -473,15 +473,9 @@ static int dasd_ioctl_information(struct dasd_block *block,
if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
- dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
- if (dasd_info == NULL)
- return -ENOMEM;
-
rc = base->discipline->fill_info(base, dasd_info);
- if (rc) {
- kfree(dasd_info);
+ if (rc)
return rc;
- }
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
@@ -520,15 +514,24 @@ static int dasd_ioctl_information(struct dasd_block *block,
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(&block->queue_lock, flags);
+ return 0;
+}
- rc = 0;
- if (copy_to_user(argp, dasd_info,
- ((cmd == (unsigned int) BIODASDINFO2) ?
- sizeof(struct dasd_information2_t) :
- sizeof(struct dasd_information_t))))
- rc = -EFAULT;
+static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
+ size_t copy_size)
+{
+ struct dasd_information2_t *dasd_info;
+ int error;
+
+ dasd_info = kzalloc(sizeof(*dasd_info), GFP_KERNEL);
+ if (!dasd_info)
+ return -ENOMEM;
+
+ error = __dasd_ioctl_information(block, dasd_info);
+ if (!error && copy_to_user(argp, dasd_info, copy_size))
+ error = -EFAULT;
kfree(dasd_info);
- return rc;
+ return error;
}
/*
@@ -622,10 +625,12 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
rc = dasd_ioctl_check_format(bdev, argp);
break;
case BIODASDINFO:
- rc = dasd_ioctl_information(block, cmd, argp);
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information_t));
break;
case BIODASDINFO2:
- rc = dasd_ioctl_information(block, cmd, argp);
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information2_t));
break;
case BIODASDPRRD:
rc = dasd_ioctl_read_profile(block, argp);
@@ -660,3 +665,36 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
dasd_put_device(base);
return rc;
}
+
+
+/**
+ * dasd_biodasdinfo() - fill out the dasd information structure
+ * @disk [in]: pointer to gendisk structure that references a DASD
+ * @info [out]: pointer to the dasd_information2_t structure
+ *
+ * Provide access to DASD specific information.
+ * The gendisk structure is checked if it belongs to the DASD driver by
+ * comparing the gendisk->fops pointer.
+ * If it does not belong to the DASD driver -EINVAL is returned.
+ * Otherwise the provided dasd_information2_t structure is filled out.
+ *
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int dasd_biodasdinfo(struct gendisk *disk, struct dasd_information2_t *info)
+{
+ struct dasd_device *base;
+ int error;
+
+ if (disk->fops != &dasd_device_operations)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(disk);
+ if (!base)
+ return -ENODEV;
+ error = __dasd_ioctl_information(base->block, info);
+ dasd_put_device(base);
+ return error;
+}
+/* export that symbol_get in partition detection is possible */
+EXPORT_SYMBOL_GPL(dasd_biodasdinfo);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index 23eae4188876..a9235f111e79 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
- vfio_ccw_async.o vfio_ccw_trace.o
+ vfio_ccw_async.o vfio_ccw_trace.o vfio_ccw_chp.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ca73c2e5a8f..c314e9495c1b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -57,6 +57,7 @@ int chsc_error_from_response(int response)
case 0x0104:
return -EINVAL;
case 0x0004:
+ case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */
return -EOPNOTSUPP;
case 0x000b:
case 0x0107: /* "Channel busy" for the op 0x003d */
@@ -1336,36 +1337,35 @@ out:
EXPORT_SYMBOL_GPL(chsc_scm_info);
/**
- * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * chsc_pnso() - Perform Network-Subchannel Operation
* @schid: id of the subchannel on which PNSO is performed
- * @brinfo_area: request and response block for the operation
+ * @pnso_area: request and response block for the operation
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
- * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
*
* Returns 0 on success.
*/
-int chsc_pnso_brinfo(struct subchannel_id schid,
- struct chsc_pnso_area *brinfo_area,
- struct chsc_brinfo_resume_token resume_token,
- int cnc)
+int chsc_pnso(struct subchannel_id schid,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc)
{
- memset(brinfo_area, 0, sizeof(*brinfo_area));
- brinfo_area->request.length = 0x0030;
- brinfo_area->request.code = 0x003d; /* network-subchannel operation */
- brinfo_area->m = schid.m;
- brinfo_area->ssid = schid.ssid;
- brinfo_area->sch = schid.sch_no;
- brinfo_area->cssid = schid.cssid;
- brinfo_area->oc = 0; /* Store-network-bridging-information list */
- brinfo_area->resume_token = resume_token;
- brinfo_area->n = (cnc != 0);
- if (chsc(brinfo_area))
+ memset(pnso_area, 0, sizeof(*pnso_area));
+ pnso_area->request.length = 0x0030;
+ pnso_area->request.code = 0x003d; /* network-subchannel operation */
+ pnso_area->m = schid.m;
+ pnso_area->ssid = schid.ssid;
+ pnso_area->sch = schid.sch_no;
+ pnso_area->cssid = schid.cssid;
+ pnso_area->oc = 0; /* Store-network-bridging-information list */
+ pnso_area->resume_token = resume_token;
+ pnso_area->n = (cnc != 0);
+ if (chsc(pnso_area))
return -EIO;
- return chsc_error_from_response(brinfo_area->response.code);
+ return chsc_error_from_response(pnso_area->response.code);
}
-EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
int chsc_sgib(u32 origin)
{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 34de6d77442c..7ecf7e4c402e 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -205,52 +205,10 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
-struct chsc_brinfo_resume_token {
- u64 t1;
- u64 t2;
-} __packed;
-
-struct chsc_brinfo_naihdr {
- struct chsc_brinfo_resume_token resume_token;
- u32:32;
- u32 instance;
- u32:24;
- u8 naids;
- u32 reserved[3];
-} __packed;
-
-struct chsc_pnso_area {
- struct chsc_header request;
- u8:2;
- u8 m:1;
- u8:5;
- u8:2;
- u8 ssid:2;
- u8 fmt:4;
- u16 sch;
- u8:8;
- u8 cssid;
- u16:16;
- u8 oc;
- u32:24;
- struct chsc_brinfo_resume_token resume_token;
- u32 n:1;
- u32:31;
- u32 reserved[3];
- struct chsc_header response;
- u32:32;
- struct chsc_brinfo_naihdr naihdr;
- union {
- struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
- struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
- struct qdio_brinfo_entry_l2 l2[0];
- } entries;
-} __packed __aligned(PAGE_SIZE);
-
-int chsc_pnso_brinfo(struct subchannel_id schid,
- struct chsc_pnso_area *brinfo_area,
- struct chsc_brinfo_resume_token resume_token,
- int cnc);
+int chsc_pnso(struct subchannel_id schid,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc);
int __init chsc_get_cssid(int idx);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ccecf6b9504e..963fcc9054c6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -710,6 +710,29 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+/**
+ * ccw_device_pnso() - Perform Network-Subchannel Operation
+ * @cdev: device on which PNSO is performed
+ * @pnso_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int ccw_device_pnso(struct ccw_device *cdev,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc)
+{
+ struct subchannel_id schid;
+
+ ccw_device_get_schid(cdev, &schid);
+ return chsc_pnso(schid, pnso_area, resume_token, cnc);
+}
+EXPORT_SYMBOL_GPL(ccw_device_pnso);
+
/*
* Allocate zeroed dma coherent 31 bit addressable memory using
* the subchannels dma pool. Maximal size of allocation supported
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 77d0ea7b381b..45f9c0736be4 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -59,18 +59,6 @@ static inline int idset_contains(struct idset *set, int ssid, int id)
return test_bit(ssid * set->num_id + id, set->bitmap);
}
-static inline int idset_get_first(struct idset *set, int *ssid, int *id)
-{
- int bitnum;
-
- bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
- if (bitnum >= set->num_ssid * set->num_id)
- return 0;
- *ssid = bitnum / set->num_id;
- *id = bitnum % set->num_id;
- return 1;
-}
-
struct idset *idset_sch_new(void)
{
return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b8453b594679..eb13c479e11d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -221,9 +221,6 @@ struct qdio_q {
*/
int first_to_check;
- /* beginning position for calling the program */
- int first_to_kick;
-
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
@@ -292,6 +289,8 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ unsigned int max_input_qs;
+ unsigned int max_output_qs;
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
unsigned long poll_state;
@@ -364,16 +363,13 @@ static inline int multicast_outbound(struct qdio_q *q)
extern u64 last_ai_time;
/* prototypes for thin interrupt */
-void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
void tiqdio_add_device(struct qdio_irq *irq_ptr);
void tiqdio_remove_device(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q);
-int tiqdio_allocate_memory(void);
-void tiqdio_free_memory(void);
-int tiqdio_register_thinints(void);
-void tiqdio_unregister_thinints(void);
+int qdio_thinint_init(void);
+void qdio_thinint_exit(void);
int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
@@ -389,8 +385,10 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data);
int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_shutdown_irq(struct qdio_irq *irq);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
-void qdio_release_memory(struct qdio_irq *irq_ptr);
+void qdio_free_queues(struct qdio_irq *irq_ptr);
+void qdio_free_async_data(struct qdio_irq *irq_ptr);
int qdio_setup_init(void);
void qdio_setup_exit(void);
int qdio_enable_async_operation(struct qdio_output_q *q);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index bcc3ab14e72d..610c05f59589 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -143,7 +143,7 @@ again:
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
- q->first_to_kick, count, q->irq_ptr->int_parm);
+ q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
@@ -191,7 +191,7 @@ again:
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
- q->first_to_kick, count, q->irq_ptr->int_parm);
+ q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
@@ -438,15 +438,12 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
q->sbal[start]->element[15].sflags);
}
-static inline void inbound_primed(struct qdio_q *q, unsigned int start,
- int count)
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
+ int count, bool auto_ack)
{
int new;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
-
- /* for QEBSM the ACK was already set by EQBS */
- if (is_qebsm(q)) {
+ if (auto_ack) {
if (!q->u.in.ack_count) {
q->u.in.ack_count = count;
q->u.in.ack_start = start;
@@ -466,15 +463,14 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start,
* or by the next inbound run.
*/
new = add_buf(start, count - 1);
- if (q->u.in.ack_count) {
- /* reset the previous ACK but first set the new one */
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
- } else {
- q->u.in.ack_count = 1;
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
- }
+ set_buf_state(q, new, SLSB_P_INPUT_ACK);
+
+ /* delete the previous ACKs */
+ if (q->u.in.ack_count)
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = 1;
q->u.in.ack_start = new;
count--;
if (!count)
@@ -508,19 +504,21 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, start, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
+ count);
+
+ inbound_handle_work(q, start, count, is_qebsm(q));
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
return count;
case SLSB_P_INPUT_ERROR:
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
+ count);
+
process_buffer_error(q, start, count);
- /*
- * Interrupts may be avoided as long as the error is present
- * so change the buffer state immediately to avoid starvation.
- */
- set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
+ inbound_handle_work(q, start, count, false);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
@@ -624,10 +622,9 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
+ unsigned int count)
{
- int start = q->first_to_kick;
-
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
@@ -644,7 +641,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = add_buf(start, count);
q->qdio_error = 0;
}
@@ -668,9 +664,9 @@ static void __qdio_inbound_processing(struct qdio_q *q)
if (count == 0)
return;
+ qdio_kick_handler(q, start, count);
start = add_buf(start, count);
q->first_to_check = start;
- qdio_kick_handler(q, count);
if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
@@ -826,7 +822,7 @@ static void __qdio_outbound_processing(struct qdio_q *q)
count = qdio_outbound_q_moved(q, start);
if (count) {
q->first_to_check = add_buf(start, count);
- qdio_kick_handler(q, count);
+ qdio_kick_handler(q, start, count);
}
if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
@@ -880,47 +876,17 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
qdio_tasklet_schedule(out);
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
+void tiqdio_inbound_processing(unsigned long data)
{
- unsigned int start = q->first_to_check;
- int count;
+ struct qdio_q *q = (struct qdio_q *)data;
- qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
/* The interrupt could be caused by a PCI request: */
qdio_check_outbound_pci_queues(q->irq_ptr);
- count = qdio_inbound_q_moved(q, start);
- if (count == 0)
- return;
-
- start = add_buf(start, count);
- q->first_to_check = start;
- qdio_kick_handler(q, count);
-
- if (!qdio_inbound_q_done(q, start)) {
- qperf_inc(q, tasklet_inbound_resched);
- if (!qdio_tasklet_schedule(q))
- return;
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!qdio_inbound_q_done(q, start)) {
- qperf_inc(q, tasklet_inbound_resched2);
- qdio_tasklet_schedule(q);
- }
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
- __tiqdio_inbound_processing(q);
+ __qdio_inbound_processing(q);
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
@@ -977,7 +943,6 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
- int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
@@ -992,9 +957,8 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
goto no_handler;
}
- count = sub_buf(q->first_to_check, q->first_to_kick);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
- q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+ q->nr, q->first_to_check, 0, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/*
@@ -1154,35 +1118,27 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
/* cleanup subchannel */
spin_lock_irq(get_ccwdev_lock(cdev));
-
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
else
/* default behaviour is halt */
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4d", rc);
goto no_cleanup;
}
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- spin_unlock_irq(get_ccwdev_lock(cdev));
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
- spin_lock_irq(get_ccwdev_lock(cdev));
no_cleanup:
qdio_shutdown_thinint(irq_ptr);
-
- /* restore interrupt handler */
- if ((void *)cdev->handler == (void *)qdio_int_handler) {
- cdev->handler = irq_ptr->orig_handler;
- cdev->private->intparm = 0;
- }
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ qdio_shutdown_irq(irq_ptr);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1213,7 +1169,11 @@ int qdio_free(struct ccw_device *cdev)
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_release_memory(irq_ptr);
+ qdio_free_async_data(irq_ptr);
+ qdio_free_queues(irq_ptr);
+ free_page((unsigned long) irq_ptr->qdr);
+ free_page(irq_ptr->chsc_page);
+ free_page((unsigned long) irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);
@@ -1229,6 +1189,7 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
{
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
+ int rc = -ENOMEM;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
@@ -1240,12 +1201,12 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr)
- goto out_err;
+ return -ENOMEM;
irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
if (qdio_allocate_dbf(irq_ptr))
- goto out_rel;
+ goto err_dbf;
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
no_output_qs);
@@ -1258,24 +1219,30 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
if (!irq_ptr->chsc_page)
- goto out_rel;
+ goto err_chsc;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
- goto out_rel;
+ goto err_qdr;
- if (qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs))
- goto out_rel;
+ rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
+ if (rc)
+ goto err_queues;
INIT_LIST_HEAD(&irq_ptr->entry);
cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
-out_rel:
- qdio_release_memory(irq_ptr);
-out_err:
- return -ENOMEM;
+
+err_queues:
+ free_page((unsigned long) irq_ptr->qdr);
+err_qdr:
+ free_page(irq_ptr->chsc_page);
+err_chsc:
+err_dbf:
+ free_page((unsigned long) irq_ptr);
+ return rc;
}
EXPORT_SYMBOL_GPL(qdio_allocate);
@@ -1338,6 +1305,10 @@ int qdio_establish(struct ccw_device *cdev,
if (!irq_ptr)
return -ENODEV;
+ if (init_data->no_input_qs > irq_ptr->max_input_qs ||
+ init_data->no_output_qs > irq_ptr->max_output_qs)
+ return -EINVAL;
+
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
return -EINVAL;
@@ -1352,8 +1323,8 @@ int qdio_establish(struct ccw_device *cdev,
rc = qdio_establish_thinint(irq_ptr);
if (rc) {
+ qdio_shutdown_irq(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
@@ -1371,8 +1342,9 @@ int qdio_establish(struct ccw_device *cdev,
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
+ qdio_shutdown_thinint(irq_ptr);
+ qdio_shutdown_irq(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
@@ -1460,25 +1432,6 @@ out:
}
EXPORT_SYMBOL_GPL(qdio_activate);
-static inline int buf_in_between(int bufnr, int start, int count)
-{
- int end = add_buf(start, count);
-
- if (end > start) {
- if (bufnr >= start && bufnr < end)
- return 1;
- else
- return 0;
- }
-
- /* wrap-around case */
- if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
- (bufnr < end))
- return 1;
- else
- return 0;
-}
-
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
@@ -1489,36 +1442,18 @@ static inline int buf_in_between(int bufnr, int start, int count)
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- int diff;
+ int overlap;
qperf_inc(q, inbound_call);
- if (!q->u.in.ack_count)
- goto set;
-
- /* protect against stop polling setting an ACK for an emptied slsb */
- if (count == QDIO_MAX_BUFFERS_PER_Q) {
- /* overwriting everything, just delete polling status */
- q->u.in.ack_count = 0;
- goto set;
- } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
- if (is_qebsm(q)) {
- /* partial overwrite, just update ack_start */
- diff = add_buf(bufnr, count);
- diff = sub_buf(diff, q->u.in.ack_start);
- q->u.in.ack_count -= diff;
- if (q->u.in.ack_count <= 0) {
- q->u.in.ack_count = 0;
- goto set;
- }
- q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
- } else {
- /* the only ACK will be deleted */
- q->u.in.ack_count = 0;
- }
+ /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */
+ overlap = min(count - sub_buf(q->u.in.ack_start, bufnr),
+ q->u.in.ack_count);
+ if (overlap > 0) {
+ q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap);
+ q->u.in.ack_count -= overlap;
}
-set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
atomic_add(count, &q->nr_buf_used);
@@ -1627,7 +1562,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
EXPORT_SYMBOL_GPL(do_QDIO);
/**
- * qdio_start_irq - process input buffers
+ * qdio_start_irq - enable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
*
* Return codes
@@ -1770,94 +1705,6 @@ int qdio_stop_irq(struct ccw_device *cdev)
}
EXPORT_SYMBOL(qdio_stop_irq);
-/**
- * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
- * @schid: Subchannel ID.
- * @cnc: Boolean Change-Notification Control
- * @response: Response code will be stored at this address
- * @cb: Callback function will be executed for each element
- * of the address list
- * @priv: Pointer to pass to the callback function.
- *
- * Performs "Store-network-bridging-information list" operation and calls
- * the callback function for every entry in the list. If "change-
- * notification-control" is set, further changes in the address list
- * will be reported via the IPA command.
- */
-int qdio_pnso_brinfo(struct subchannel_id schid,
- int cnc, u16 *response,
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
- void *entry),
- void *priv)
-{
- struct chsc_pnso_area *rr;
- int rc;
- u32 prev_instance = 0;
- int isfirstblock = 1;
- int i, size, elems;
-
- rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
- if (rr == NULL)
- return -ENOMEM;
- do {
- /* on the first iteration, naihdr.resume_token will be zero */
- rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
- if (rc != 0 && rc != -EBUSY)
- goto out;
- if (rr->response.code != 1) {
- rc = -EIO;
- continue;
- } else
- rc = 0;
-
- if (cb == NULL)
- continue;
-
- size = rr->naihdr.naids;
- elems = (rr->response.length -
- sizeof(struct chsc_header) -
- sizeof(struct chsc_brinfo_naihdr)) /
- size;
-
- if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
- /* Inform the caller that they need to scrap */
- /* the data that was already reported via cb */
- rc = -EAGAIN;
- break;
- }
- isfirstblock = 0;
- prev_instance = rr->naihdr.instance;
- for (i = 0; i < elems; i++)
- switch (size) {
- case sizeof(struct qdio_brinfo_entry_l3_ipv6):
- (*cb)(priv, l3_ipv6_addr,
- &rr->entries.l3_ipv6[i]);
- break;
- case sizeof(struct qdio_brinfo_entry_l3_ipv4):
- (*cb)(priv, l3_ipv4_addr,
- &rr->entries.l3_ipv4[i]);
- break;
- case sizeof(struct qdio_brinfo_entry_l2):
- (*cb)(priv, l2_addr_lnid,
- &rr->entries.l2[i]);
- break;
- default:
- WARN_ON_ONCE(1);
- rc = -EIO;
- goto out;
- }
- } while (rr->response.code == 0x0107 || /* channel busy */
- (rr->response.code == 1 && /* list stored */
- /* resume token is non-zero => list incomplete */
- (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
- (*response) = rr->response.code;
-
-out:
- free_page((unsigned long)rr);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
-
static int __init init_QDIO(void)
{
int rc;
@@ -1868,16 +1715,11 @@ static int __init init_QDIO(void)
rc = qdio_setup_init();
if (rc)
goto out_debug;
- rc = tiqdio_allocate_memory();
+ rc = qdio_thinint_init();
if (rc)
goto out_cache;
- rc = tiqdio_register_thinints();
- if (rc)
- goto out_ti;
return 0;
-out_ti:
- tiqdio_free_memory();
out_cache:
qdio_setup_exit();
out_debug:
@@ -1887,8 +1729,7 @@ out_debug:
static void __exit exit_QDIO(void)
{
- tiqdio_unregister_thinints();
- tiqdio_free_memory();
+ qdio_thinint_exit();
qdio_setup_exit();
qdio_debug_exit();
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 3083edd61f0c..2c5cc6ec668e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -135,6 +135,27 @@ output:
}
}
+static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
+{
+ struct qdio_q *q;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ q = queues[i];
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+}
+
+void qdio_free_queues(struct qdio_irq *irq_ptr)
+{
+ __qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
+ irq_ptr->max_input_qs = 0;
+
+ __qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
+ irq_ptr->max_output_qs = 0;
+}
+
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
@@ -142,12 +163,15 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
for (i = 0; i < nr_queues; i++) {
q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
- if (!q)
+ if (!q) {
+ __qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
+ }
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
+ __qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
irq_ptr_qs[i] = q;
@@ -162,8 +186,16 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
if (rc)
return rc;
+
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
- return rc;
+ if (rc) {
+ __qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
+ return rc;
+ }
+
+ irq_ptr->max_input_qs = nr_input_qs;
+ irq_ptr->max_output_qs = nr_output_qs;
+ return 0;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
@@ -347,45 +379,28 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
-void qdio_release_memory(struct qdio_irq *irq_ptr)
+void qdio_free_async_data(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
- /*
- * Must check queue array manually since irq_ptr->nr_input_queues /
- * irq_ptr->nr_input_queues may not yet be set.
- */
- for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
- q = irq_ptr->input_qs[i];
- if (q) {
- free_page((unsigned long) q->slib);
- kmem_cache_free(qdio_q_cache, q);
- }
- }
- for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ for (i = 0; i < irq_ptr->max_output_qs; i++) {
q = irq_ptr->output_qs[i];
- if (q) {
- if (q->u.out.use_cq) {
- int n;
-
- for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
- struct qaob *aob = q->u.out.aobs[n];
- if (aob) {
- qdio_release_aob(aob);
- q->u.out.aobs[n] = NULL;
- }
- }
+ if (q->u.out.use_cq) {
+ unsigned int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
+ struct qaob *aob = q->u.out.aobs[n];
- qdio_disable_async_operation(&q->u.out);
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
}
- free_page((unsigned long) q->slib);
- kmem_cache_free(qdio_q_cache, q);
+
+ qdio_disable_async_operation(&q->u.out);
}
}
- free_page((unsigned long) irq_ptr->qdr);
- free_page(irq_ptr->chsc_page);
- free_page((unsigned long) irq_ptr);
}
static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
@@ -480,7 +495,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
}
setup_qib(irq_ptr, init_data);
- qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
init_data->qib_param_field,
init_data->input_slib_elements,
@@ -491,6 +505,12 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+ /* set our IRQ handler */
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ irq_ptr->orig_handler = cdev->handler;
+ cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
/* get qdio commands */
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
@@ -506,12 +526,18 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
}
irq_ptr->aqueue = *ciw;
- /* set new interrupt handler */
+ return 0;
+}
+
+void qdio_shutdown_irq(struct qdio_irq *irq)
+{
+ struct ccw_device *cdev = irq->cdev;
+
+ /* restore IRQ handler */
spin_lock_irq(get_ccwdev_lock(cdev));
- irq_ptr->orig_handler = cdev->handler;
- cdev->handler = qdio_int_handler;
+ cdev->handler = irq->orig_handler;
+ cdev->private->intparm = 0;
spin_unlock_irq(get_ccwdev_lock(cdev));
- return 0;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ae50373617cd..7a440e4328cd 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -197,47 +197,21 @@ out:
return rc;
}
-/* allocate non-shared indicators and shared indicator */
-int __init tiqdio_allocate_memory(void)
-{
- q_indicators = kcalloc(TIQDIO_NR_INDICATORS,
- sizeof(struct indicator_t),
- GFP_KERNEL);
- if (!q_indicators)
- return -ENOMEM;
- return 0;
-}
-
-void tiqdio_free_memory(void)
-{
- kfree(q_indicators);
-}
-
-int __init tiqdio_register_thinints(void)
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
int rc;
- rc = register_adapter_interrupt(&tiqdio_airq);
- if (rc) {
- DBF_EVENT("RTI:%x", rc);
- return rc;
- }
- return 0;
-}
-
-int qdio_establish_thinint(struct qdio_irq *irq_ptr)
-{
if (!is_thinint_irq(irq_ptr))
return 0;
- return set_subchannel_ind(irq_ptr, 0);
-}
-void qdio_setup_thinint(struct qdio_irq *irq_ptr)
-{
- if (!is_thinint_irq(irq_ptr))
- return;
irq_ptr->dsci = get_indicator();
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+
+ rc = set_subchannel_ind(irq_ptr, 0);
+ if (rc)
+ put_indicator(irq_ptr->dsci);
+
+ return rc;
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
@@ -250,8 +224,27 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
put_indicator(irq_ptr->dsci);
}
-void __exit tiqdio_unregister_thinints(void)
+int __init qdio_thinint_init(void)
+{
+ int rc;
+
+ q_indicators = kcalloc(TIQDIO_NR_INDICATORS, sizeof(struct indicator_t),
+ GFP_KERNEL);
+ if (!q_indicators)
+ return -ENOMEM;
+
+ rc = register_adapter_interrupt(&tiqdio_airq);
+ if (rc) {
+ DBF_EVENT("RTI:%x", rc);
+ kfree(q_indicators);
+ return rc;
+ }
+ return 0;
+}
+
+void __exit qdio_thinint_exit(void)
{
WARN_ON(!list_empty(&tiq_list));
unregister_adapter_interrupt(&tiqdio_airq);
+ kfree(q_indicators);
}
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
new file mode 100644
index 000000000000..a646fc81c872
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel path related status regions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ * Eric Farman <farman@linux.ibm.com>
+ */
+
+#include <linux/vfio.h>
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_schib_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+
+ if (cio_update_schib(private->sch)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memcpy(region, &private->sch->schib, sizeof(*region));
+
+ if (copy_to_user(buf, (void *)region + pos, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
+ .read = vfio_ccw_schib_region_read,
+ .write = vfio_ccw_schib_region_write,
+ .release = vfio_ccw_schib_region_release,
+};
+
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_SCHIB,
+ &vfio_ccw_schib_region_ops,
+ sizeof(struct ccw_schib_region),
+ VFIO_REGION_INFO_FLAG_READ,
+ private->schib_region);
+}
+
+static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_crw_region *region;
+ struct vfio_ccw_crw *crw;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ crw = list_first_entry_or_null(&private->crw,
+ struct vfio_ccw_crw, next);
+
+ if (crw)
+ list_del(&crw->next);
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+
+ if (crw)
+ memcpy(&region->crw, &crw->crw, sizeof(region->crw));
+
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+
+ region->crw = 0;
+
+ mutex_unlock(&private->io_mutex);
+
+ kfree(crw);
+
+ /* Notify the guest if more CRWs are on our queue */
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+
+ return ret;
+}
+
+static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
+ .read = vfio_ccw_crw_region_read,
+ .write = vfio_ccw_crw_region_write,
+ .release = vfio_ccw_crw_region_release,
+};
+
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_CRW,
+ &vfio_ccw_crw_region_ops,
+ sizeof(struct ccw_crw_region),
+ VFIO_REGION_INFO_FLAG_READ,
+ private->crw_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 3645d1720c4b..b9febc581b1f 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -8,6 +8,7 @@
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
*/
+#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/iommu.h>
@@ -625,23 +626,27 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
* the target channel program from @orb->cmd.iova to the new ccwchain(s).
*
* Limitations:
- * 1. Supports only prefetch enabled mode.
- * 2. Supports idal(c64) ccw chaining.
- * 3. Supports 4k idaw.
+ * 1. Supports idal(c64) ccw chaining.
+ * 2. Supports 4k idaw.
*
* Returns:
* %0 on success and a negative error value on failure.
*/
int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
{
+ /* custom ratelimit used to avoid flood during guest IPL */
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret;
/*
- * XXX:
- * Only support prefetch enable mode now.
+ * We only support prefetching the channel program. We assume all channel
+ * programs executed by supported guests likewise support prefetching.
+ * Executing a channel program that does not specify prefetching will
+ * typically not cause an error, but a warning is issued to help identify
+ * the problem if something does break.
*/
- if (!orb->cmd.pfch)
- return -EOPNOTSUPP;
+ if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
+ dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
INIT_LIST_HEAD(&cp->ccwchain_list);
memcpy(&cp->orb, orb, sizeof(*orb));
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 339a6bc0339b..8c625b530035 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -19,6 +19,7 @@
#include <asm/isc.h>
+#include "chp.h"
#include "ioasm.h"
#include "css.h"
#include "vfio_ccw_private.h"
@@ -26,6 +27,8 @@
struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
static struct kmem_cache *vfio_ccw_cmd_region;
+static struct kmem_cache *vfio_ccw_schib_region;
+static struct kmem_cache *vfio_ccw_crw_region;
debug_info_t *vfio_ccw_debug_msg_id;
debug_info_t *vfio_ccw_debug_trace_id;
@@ -105,6 +108,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
eventfd_signal(private->io_trigger, 1);
}
+static void vfio_ccw_crw_todo(struct work_struct *work)
+{
+ struct vfio_ccw_private *private;
+
+ private = container_of(work, struct vfio_ccw_private, crw_work);
+
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+}
+
/*
* Css driver callbacks
*/
@@ -116,6 +129,18 @@ static void vfio_ccw_sch_irq(struct subchannel *sch)
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
+static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
+{
+ if (private->crw_region)
+ kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
+ if (private->schib_region)
+ kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
+ if (private->cmd_region)
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ if (private->io_region)
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
+}
+
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
@@ -147,6 +172,18 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (!private->cmd_region)
goto out_free;
+ private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!private->schib_region)
+ goto out_free;
+
+ private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!private->crw_region)
+ goto out_free;
+
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
mutex_init(&private->io_mutex);
@@ -159,7 +196,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_free;
+ INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+ INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY;
@@ -181,10 +220,7 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
- if (private->cmd_region)
- kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
- if (private->io_region)
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ vfio_ccw_free_regions(private);
kfree(private->cp.guest_cp);
kfree(private);
return ret;
@@ -193,15 +229,20 @@ out_free:
static int vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ struct vfio_ccw_crw *crw, *temp;
vfio_ccw_sch_quiesce(sch);
+ list_for_each_entry_safe(crw, temp, &private->crw, next) {
+ list_del(&crw->next);
+ kfree(crw);
+ }
+
vfio_ccw_mdev_unreg(sch);
dev_set_drvdata(&sch->dev, NULL);
- kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ vfio_ccw_free_regions(private);
kfree(private->cp.guest_cp);
kfree(private);
@@ -258,6 +299,83 @@ out_unlock:
return rc;
}
+static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
+ unsigned int rsc,
+ unsigned int erc,
+ unsigned int rsid)
+{
+ struct vfio_ccw_crw *crw;
+
+ /*
+ * If unable to allocate a CRW, just drop the event and
+ * carry on. The guest will either see a later one or
+ * learn when it issues its own store subchannel.
+ */
+ crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
+ if (!crw)
+ return;
+
+ /*
+ * Build the CRW based on the inputs given to us.
+ */
+ crw->crw.rsc = rsc;
+ crw->crw.erc = erc;
+ crw->crw.rsid = rsid;
+
+ list_add_tail(&crw->next, &private->crw);
+ queue_work(vfio_ccw_work_q, &private->crw_work);
+}
+
+static int vfio_ccw_chp_event(struct subchannel *sch,
+ struct chp_link *link, int event)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ int mask = chp_ssd_get_mask(&sch->ssd_info, link);
+ int retry = 255;
+
+ if (!private || !mask)
+ return 0;
+
+ trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
+ VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
+ mdev_uuid(private->mdev), sch->schid.cssid,
+ sch->schid.ssid, sch->schid.sch_no,
+ mask, event);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ switch (event) {
+ case CHP_VARY_OFF:
+ /* Path logically turned off */
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (sch->schib.pmcw.lpum & mask)
+ cio_cancel_halt_clear(sch, &retry);
+ break;
+ case CHP_OFFLINE:
+ /* Path is gone */
+ if (sch->schib.pmcw.lpum & mask)
+ cio_cancel_halt_clear(sch, &retry);
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
+ link->chpid.id);
+ break;
+ case CHP_VARY_ON:
+ /* Path logically turned on */
+ sch->opm |= mask;
+ sch->lpm |= mask;
+ break;
+ case CHP_ONLINE:
+ /* Path became available */
+ sch->lpm |= mask & sch->opm;
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
+ link->chpid.id);
+ break;
+ }
+
+ return 0;
+}
+
static struct css_device_id vfio_ccw_sch_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
@@ -275,6 +393,7 @@ static struct css_driver vfio_ccw_sch_driver = {
.remove = vfio_ccw_sch_remove,
.shutdown = vfio_ccw_sch_shutdown,
.sch_event = vfio_ccw_sch_event,
+ .chp_event = vfio_ccw_chp_event,
};
static int __init vfio_ccw_debug_init(void)
@@ -304,6 +423,14 @@ static void vfio_ccw_debug_exit(void)
debug_unregister(vfio_ccw_debug_trace_id);
}
+static void vfio_ccw_destroy_regions(void)
+{
+ kmem_cache_destroy(vfio_ccw_crw_region);
+ kmem_cache_destroy(vfio_ccw_schib_region);
+ kmem_cache_destroy(vfio_ccw_cmd_region);
+ kmem_cache_destroy(vfio_ccw_io_region);
+}
+
static int __init vfio_ccw_sch_init(void)
{
int ret;
@@ -336,6 +463,26 @@ static int __init vfio_ccw_sch_init(void)
goto out_err;
}
+ vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
+ sizeof(struct ccw_schib_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_schib_region), NULL);
+
+ if (!vfio_ccw_schib_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
+ sizeof(struct ccw_crw_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_crw_region), NULL);
+
+ if (!vfio_ccw_crw_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
@@ -346,8 +493,7 @@ static int __init vfio_ccw_sch_init(void)
return ret;
out_err:
- kmem_cache_destroy(vfio_ccw_cmd_region);
- kmem_cache_destroy(vfio_ccw_io_region);
+ vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
return ret;
@@ -357,8 +503,7 @@ static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC);
- kmem_cache_destroy(vfio_ccw_io_region);
- kmem_cache_destroy(vfio_ccw_cmd_region);
+ vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
}
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index f0d71ab77c50..8b3ed5b45277 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -172,8 +172,22 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &private->nb);
+ goto out_unregister;
+
+ ret = vfio_ccw_register_schib_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ ret = vfio_ccw_register_crw_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ return ret;
+
+out_unregister:
+ vfio_ccw_unregister_dev_regions(private);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
return ret;
}
@@ -181,7 +195,6 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
- int i;
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
@@ -191,15 +204,9 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
}
cp_free(&private->cp);
+ vfio_ccw_unregister_dev_regions(private);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb);
-
- for (i = 0; i < private->num_regions; i++)
- private->region[i].ops->release(private, &private->region[i]);
-
- private->num_regions = 0;
- kfree(private->region);
- private->region = NULL;
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -384,17 +391,22 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
{
- if (info->index != VFIO_CCW_IO_IRQ_INDEX)
+ switch (info->index) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ info->count = 1;
+ info->flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
+ default:
return -EINVAL;
-
- info->count = 1;
- info->flags = VFIO_IRQ_INFO_EVENTFD;
+ }
return 0;
}
static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
uint32_t flags,
+ uint32_t index,
void __user *data)
{
struct vfio_ccw_private *private;
@@ -404,7 +416,17 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- ctx = &private->io_trigger;
+
+ switch (index) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ ctx = &private->io_trigger;
+ break;
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ ctx = &private->crw_trigger;
+ break;
+ default:
+ return -EINVAL;
+ }
switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_NONE:
@@ -482,6 +504,17 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
return 0;
}
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
+{
+ int i;
+
+ for (i = 0; i < private->num_regions; i++)
+ private->region[i].ops->release(private, &private->region[i]);
+ private->num_regions = 0;
+ kfree(private->region);
+ private->region = NULL;
+}
+
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd,
unsigned long arg)
@@ -565,7 +598,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
return ret;
data = (void __user *)(arg + minsz);
- return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
+ return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
}
case VFIO_DEVICE_RESET:
return vfio_ccw_mdev_reset(mdev);
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 9b9bb4982972..8723156b29ea 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -17,6 +17,7 @@
#include <linux/eventfd.h>
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
+#include <asm/crw.h>
#include <asm/debug.h>
#include "css.h"
@@ -53,8 +54,16 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
unsigned int subtype,
const struct vfio_ccw_regops *ops,
size_t size, u32 flags, void *data);
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private);
int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private);
+
+struct vfio_ccw_crw {
+ struct list_head next;
+ struct crw crw;
+};
/**
* struct vfio_ccw_private
@@ -68,6 +77,8 @@ int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
* @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @schib_region: MMIO region for SCHIB information
+ * @crw_region: MMIO region for getting channel report words
* @num_regions: number of additional regions
* @cp: channel program for the current I/O operation
* @irb: irb info received from interrupt
@@ -86,14 +97,19 @@ struct vfio_ccw_private {
struct mutex io_mutex;
struct vfio_ccw_region *region;
struct ccw_cmd_region *cmd_region;
+ struct ccw_schib_region *schib_region;
+ struct ccw_crw_region *crw_region;
int num_regions;
struct channel_program cp;
struct irb irb;
union scsw scsw;
+ struct list_head crw;
struct eventfd_ctx *io_trigger;
+ struct eventfd_ctx *crw_trigger;
struct work_struct io_work;
+ struct work_struct crw_work;
} __aligned(8);
extern int vfio_ccw_mdev_reg(struct subchannel *sch);
diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c
index 8c671d2519f6..4a0205905afc 100644
--- a/drivers/s390/cio/vfio_ccw_trace.c
+++ b/drivers/s390/cio/vfio_ccw_trace.c
@@ -9,6 +9,7 @@
#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_chp_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index f5d31887d413..62fb30598d47 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -17,6 +17,36 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(vfio_ccw_chp_event,
+ TP_PROTO(struct subchannel_id schid,
+ int mask,
+ int event),
+ TP_ARGS(schid, mask, event),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, mask)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->mask = mask;
+ __entry->event = event;
+ ),
+
+ TP_printk("schid=%x.%x.%04x mask=0x%x event=%d",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->mask,
+ __entry->event)
+);
+
TRACE_EVENT(vfio_ccw_fsm_async_request,
TP_PROTO(struct subchannel_id schid,
int command,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 35064443e748..e71ca4a719a5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -62,8 +62,10 @@ MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
static struct device *ap_root_device;
-DEFINE_SPINLOCK(ap_list_lock);
-LIST_HEAD(ap_card_list);
+/* Hashtable of all queue devices on the AP bus */
+DEFINE_HASHTABLE(ap_queues, 8);
+/* lock used for the ap_queues hashtable */
+DEFINE_SPINLOCK(ap_queues_lock);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
@@ -414,7 +416,7 @@ static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
*/
static void ap_tasklet_fn(unsigned long dummy)
{
- struct ap_card *ac;
+ int bkt;
struct ap_queue *aq;
enum ap_wait wait = AP_WAIT_NONE;
@@ -425,34 +427,30 @@ static void ap_tasklet_fn(unsigned long dummy)
if (ap_using_interrupts())
xchg(ap_airq.lsi_ptr, 0);
- spin_lock_bh(&ap_list_lock);
- for_each_ap_card(ac) {
- for_each_ap_queue(aq, ac) {
- spin_lock_bh(&aq->lock);
- wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
- spin_unlock_bh(&aq->lock);
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ spin_lock_bh(&aq->lock);
+ wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
}
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
ap_wait(wait);
}
static int ap_pending_requests(void)
{
- struct ap_card *ac;
+ int bkt;
struct ap_queue *aq;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_card(ac) {
- for_each_ap_queue(aq, ac) {
- if (aq->queue_count == 0)
- continue;
- spin_unlock_bh(&ap_list_lock);
- return 1;
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->queue_count == 0)
+ continue;
+ spin_unlock_bh(&ap_queues_lock);
+ return 1;
}
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
return 0;
}
@@ -683,24 +681,20 @@ static int ap_device_probe(struct device *dev)
}
/* Add queue/card to list of active queues/cards */
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_add(&to_ap_card(dev)->list, &ap_card_list);
- else
- list_add(&to_ap_queue(dev)->list,
- &to_ap_queue(dev)->card->queues);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_add(ap_queues, &to_ap_queue(dev)->hnode,
+ to_ap_queue(dev)->qid);
+ spin_unlock_bh(&ap_queues_lock);
ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
if (rc) {
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_del_init(&to_ap_card(dev)->list);
- else
- list_del_init(&to_ap_queue(dev)->list);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
ap_dev->drv = NULL;
}
@@ -725,16 +719,33 @@ static int ap_device_remove(struct device *dev)
ap_queue_remove(to_ap_queue(dev));
/* Remove queue/card from list of active queues/cards */
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_del_init(&to_ap_card(dev)->list);
- else
- list_del_init(&to_ap_queue(dev)->list);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
return 0;
}
+struct ap_queue *ap_get_qdev(ap_qid_t qid)
+{
+ int bkt;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->qid == qid) {
+ get_device(&aq->ap_dev.device);
+ spin_unlock_bh(&ap_queues_lock);
+ return aq;
+ }
+ }
+ spin_unlock_bh(&ap_queues_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ap_get_qdev);
+
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
@@ -1506,6 +1517,9 @@ static int __init ap_module_init(void)
return -ENODEV;
}
+ /* init ap_queue hashtable */
+ hash_init(ap_queues);
+
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 8e8e37b6c0ee..053cc34d2ca2 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <linux/hashtable.h>
#include <asm/isc.h>
#include <asm/ap.h>
@@ -27,8 +28,8 @@
extern int ap_domain_index;
-extern spinlock_t ap_list_lock;
-extern struct list_head ap_card_list;
+extern DECLARE_HASHTABLE(ap_queues, 8);
+extern spinlock_t ap_queues_lock;
static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
{
@@ -152,8 +153,6 @@ struct ap_device {
struct ap_card {
struct ap_device ap_dev;
- struct list_head list; /* Private list of AP cards. */
- struct list_head queues; /* List of assoc. AP queues */
void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
@@ -166,7 +165,7 @@ struct ap_card {
struct ap_queue {
struct ap_device ap_dev;
- struct list_head list; /* Private list of AP queues. */
+ struct hlist_node hnode; /* Node for the ap_queues hashtable */
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
void *private; /* ap driver private pointer. */
@@ -223,12 +222,6 @@ static inline void ap_release_message(struct ap_message *ap_msg)
kzfree(ap_msg->private);
}
-#define for_each_ap_card(_ac) \
- list_for_each_entry(_ac, &ap_card_list, list)
-
-#define for_each_ap_queue(_aq, _ac) \
- list_for_each_entry(_aq, &(_ac)->queues, list)
-
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
@@ -270,6 +263,16 @@ extern struct ap_perms ap_perms;
extern struct mutex ap_perms_mutex;
/*
+ * Get ap_queue device for this qid.
+ * Returns ptr to the struct ap_queue device or NULL if there
+ * was no ap_queue device with this qid found. When something is
+ * found, the reference count of the embedded device is increased.
+ * So the caller has to decrease the reference count after use
+ * with a call to put_device(&aq->ap_dev.device).
+ */
+struct ap_queue *ap_get_qdev(ap_qid_t qid);
+
+/*
* check APQN for owned/reserved by ap bus and default driver(s).
* Checks if this APQN is or will be in use by the ap bus
* and the default set of drivers.
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 0a39dfdb6a1d..6588713319ba 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -66,9 +66,9 @@ static ssize_t request_count_show(struct device *dev,
u64 req_cnt;
req_cnt = 0;
- spin_lock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
req_cnt = atomic64_read(&ac->total_request_count);
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
@@ -76,13 +76,15 @@ static ssize_t request_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
+ struct ap_card *ac = to_ap_card(dev);
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- aq->total_request_count = 0;
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ aq->total_request_count = 0;
+ spin_unlock_bh(&ap_queues_lock);
atomic64_set(&ac->total_request_count, 0);
return count;
@@ -93,15 +95,17 @@ static DEVICE_ATTR_RW(request_count);
static ssize_t requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
unsigned int reqq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
reqq_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- reqq_cnt += aq->requestq_count;
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ reqq_cnt += aq->requestq_count;
+ spin_unlock_bh(&ap_queues_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
@@ -110,15 +114,17 @@ static DEVICE_ATTR_RO(requestq_count);
static ssize_t pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
unsigned int penq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
penq_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- penq_cnt += aq->pendingq_count;
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ penq_cnt += aq->pendingq_count;
+ spin_unlock_bh(&ap_queues_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
@@ -163,11 +169,6 @@ static void ap_card_device_release(struct device *dev)
{
struct ap_card *ac = to_ap_card(dev);
- if (!list_empty(&ac->list)) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&ac->list);
- spin_unlock_bh(&ap_list_lock);
- }
kfree(ac);
}
@@ -179,8 +180,6 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
- INIT_LIST_HEAD(&ac->list);
- INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = comp_type;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 0eaf1d04e8df..73b077dca3e6 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -568,11 +568,10 @@ static void ap_queue_device_release(struct device *dev)
{
struct ap_queue *aq = to_ap_queue(dev);
- if (!list_empty(&aq->list)) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&aq->list);
- spin_unlock_bh(&ap_list_lock);
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_del(&aq->hnode);
+ spin_unlock_bh(&ap_queues_lock);
+
kfree(aq);
}
@@ -590,7 +589,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
- INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
timer_setup(&aq->timeout, ap_request_timeout, 0);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 3850a0f5f0bc..53120e68796e 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -63,12 +63,9 @@ config QETH
prompt "Gigabit Ethernet device support"
depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
help
- This driver supports the IBM System z OSA Express adapters
- in QDIO mode (all media types), HiperSockets interfaces and z/VM
- virtual NICs for Guest LAN and VSWITCH.
-
- For details please refer to the documentation provided by IBM at
- <http://www.ibm.com/developerworks/linux/linux390>
+ This driver supports IBM's OSA Express network adapters in QDIO mode,
+ HiperSockets interfaces and z/VM virtual NICs for Guest LAN and
+ VSWITCH.
To compile this driver as a module, choose M.
The module name is qeth.
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 437a6d822105..d06809eac16d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1698,43 +1698,6 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
put_device(&cgdev->dev);
}
-static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
-{
- struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
-
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
- netif_device_detach(priv->channel[CTCM_READ]->netdev);
- ctcm_close(priv->channel[CTCM_READ]->netdev);
- if (!wait_event_timeout(priv->fsm->wait_q,
- fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
- netif_device_attach(priv->channel[CTCM_READ]->netdev);
- return -EBUSY;
- }
- ccw_device_set_offline(gdev->cdev[1]);
- ccw_device_set_offline(gdev->cdev[0]);
- return 0;
-}
-
-static int ctcm_pm_resume(struct ccwgroup_device *gdev)
-{
- struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
- int rc;
-
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
- rc = ccw_device_set_online(gdev->cdev[1]);
- if (rc)
- goto err_out;
- rc = ccw_device_set_online(gdev->cdev[0]);
- if (rc)
- goto err_out;
- ctcm_open(priv->channel[CTCM_READ]->netdev);
-err_out:
- netif_device_attach(priv->channel[CTCM_READ]->netdev);
- return rc;
-}
-
static struct ccw_device_id ctcm_ids[] = {
{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
@@ -1764,9 +1727,6 @@ static struct ccwgroup_driver ctcm_group_driver = {
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
.set_offline = ctcm_shutdown_device,
- .freeze = ctcm_pm_suspend,
- .thaw = ctcm_pm_resume,
- .restore = ctcm_pm_resume,
};
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c75112ee7b97..c7fade836d83 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
- if (!ism->smcd)
+ if (!ism->smcd) {
+ ret = -ENOMEM;
goto err_resource;
+ }
ism->smcd->priv = ism;
ret = ism_dev_init(ism);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 8f08b0a2917c..440219bcaa2b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2296,60 +2296,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
put_device(&ccwgdev->dev);
}
-static int lcs_pm_suspend(struct lcs_card *card)
-{
- if (card->dev)
- netif_device_detach(card->dev);
- lcs_set_allowed_threads(card, 0);
- lcs_wait_for_threads(card, 0xffffffff);
- if (card->state != DEV_STATE_DOWN)
- __lcs_shutdown_device(card->gdev, 1);
- return 0;
-}
-
-static int lcs_pm_resume(struct lcs_card *card)
-{
- int rc = 0;
-
- if (card->state == DEV_STATE_RECOVER)
- rc = lcs_new_device(card->gdev);
- if (card->dev)
- netif_device_attach(card->dev);
- if (rc) {
- dev_warn(&card->gdev->dev, "The lcs device driver "
- "failed to recover the device\n");
- }
- return rc;
-}
-
-static int lcs_prepare(struct ccwgroup_device *gdev)
-{
- return 0;
-}
-
-static void lcs_complete(struct ccwgroup_device *gdev)
-{
- return;
-}
-
-static int lcs_freeze(struct ccwgroup_device *gdev)
-{
- struct lcs_card *card = dev_get_drvdata(&gdev->dev);
- return lcs_pm_suspend(card);
-}
-
-static int lcs_thaw(struct ccwgroup_device *gdev)
-{
- struct lcs_card *card = dev_get_drvdata(&gdev->dev);
- return lcs_pm_resume(card);
-}
-
-static int lcs_restore(struct ccwgroup_device *gdev)
-{
- struct lcs_card *card = dev_get_drvdata(&gdev->dev);
- return lcs_pm_resume(card);
-}
-
static struct ccw_device_id lcs_ids[] = {
{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
@@ -2382,11 +2328,6 @@ static struct ccwgroup_driver lcs_group_driver = {
.remove = lcs_remove_device,
.set_online = lcs_new_device,
.set_offline = lcs_shutdown_device,
- .prepare = lcs_prepare,
- .complete = lcs_complete,
- .freeze = lcs_freeze,
- .thaw = lcs_thaw,
- .restore = lcs_restore,
};
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5ce2424ca729..260860cf3aa1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -112,27 +112,10 @@ DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
*/
#define PRINTK_HEADER " iucv: " /* for debugging */
-/* dummy device to make sure netiucv_pm functions are called */
-static struct device *netiucv_dev;
-
-static int netiucv_pm_prepare(struct device *);
-static void netiucv_pm_complete(struct device *);
-static int netiucv_pm_freeze(struct device *);
-static int netiucv_pm_restore_thaw(struct device *);
-
-static const struct dev_pm_ops netiucv_pm_ops = {
- .prepare = netiucv_pm_prepare,
- .complete = netiucv_pm_complete,
- .freeze = netiucv_pm_freeze,
- .thaw = netiucv_pm_restore_thaw,
- .restore = netiucv_pm_restore_thaw,
-};
-
static struct device_driver netiucv_driver = {
.owner = THIS_MODULE,
.name = "netiucv",
.bus = &iucv_bus,
- .pm = &netiucv_pm_ops,
};
static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
@@ -213,7 +196,6 @@ struct netiucv_priv {
fsm_instance *fsm;
struct iucv_connection *conn;
struct device *dev;
- int pm_state;
};
/**
@@ -1275,72 +1257,6 @@ static int netiucv_close(struct net_device *dev)
return 0;
}
-static int netiucv_pm_prepare(struct device *dev)
-{
- IUCV_DBF_TEXT(trace, 3, __func__);
- return 0;
-}
-
-static void netiucv_pm_complete(struct device *dev)
-{
- IUCV_DBF_TEXT(trace, 3, __func__);
- return;
-}
-
-/**
- * netiucv_pm_freeze() - Freeze PM callback
- * @dev: netiucv device
- *
- * close open netiucv interfaces
- */
-static int netiucv_pm_freeze(struct device *dev)
-{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = NULL;
- int rc = 0;
-
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (priv && priv->conn)
- ndev = priv->conn->netdev;
- if (!ndev)
- goto out;
- netif_device_detach(ndev);
- priv->pm_state = fsm_getstate(priv->fsm);
- rc = netiucv_close(ndev);
-out:
- return rc;
-}
-
-/**
- * netiucv_pm_restore_thaw() - Thaw and restore PM callback
- * @dev: netiucv device
- *
- * re-open netiucv interfaces closed during freeze
- */
-static int netiucv_pm_restore_thaw(struct device *dev)
-{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = NULL;
- int rc = 0;
-
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (priv && priv->conn)
- ndev = priv->conn->netdev;
- if (!ndev)
- goto out;
- switch (priv->pm_state) {
- case DEV_STATE_RUNNING:
- case DEV_STATE_STARTWAIT:
- rc = netiucv_open(ndev);
- break;
- default:
- break;
- }
- netif_device_attach(ndev);
-out:
- return rc;
-}
-
/**
* Start transmission of a packet.
* Called from generic network device layer.
@@ -2156,7 +2072,6 @@ static void __exit netiucv_exit(void)
netiucv_unregister_device(dev);
}
- device_unregister(netiucv_dev);
driver_unregister(&netiucv_driver);
iucv_unregister(&netiucv_handler, 1);
iucv_unregister_dbf_views();
@@ -2182,27 +2097,10 @@ static int __init netiucv_init(void)
IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
goto out_iucv;
}
- /* establish dummy device */
- netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!netiucv_dev) {
- rc = -ENOMEM;
- goto out_driver;
- }
- dev_set_name(netiucv_dev, "netiucv");
- netiucv_dev->bus = &iucv_bus;
- netiucv_dev->parent = iucv_root;
- netiucv_dev->release = (void (*)(struct device *))kfree;
- netiucv_dev->driver = &netiucv_driver;
- rc = device_register(netiucv_dev);
- if (rc) {
- put_device(netiucv_dev);
- goto out_driver;
- }
+
netiucv_banner();
return rc;
-out_driver:
- driver_unregister(&netiucv_driver);
out_iucv:
iucv_unregister(&netiucv_handler, 1);
out_dbf:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e0b26310ecab..51ea56b73a97 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -11,6 +11,7 @@
#define __QETH_CORE_H__
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
@@ -21,8 +22,10 @@
#include <linux/seq_file.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -31,6 +34,7 @@
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/route.h>
#include <net/sch_generic.h>
#include <net/tcp.h>
@@ -231,11 +235,7 @@ struct qeth_hdr_layer3 {
__u16 frame_offset;
union {
/* TX: */
- struct in6_addr ipv6_addr;
- struct ipv4 {
- u8 res[12];
- u32 addr;
- } ipv4;
+ struct in6_addr addr;
/* RX: */
struct rx {
u8 res1[2];
@@ -352,10 +352,15 @@ static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
struct qeth_hdr_layer3 *h2)
{
return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
- ipv6_addr_equal(&h1->next_hop.ipv6_addr,
- &h2->next_hop.ipv6_addr);
+ ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr);
}
+struct qeth_local_addr {
+ struct hlist_node hnode;
+ struct rcu_head rcu;
+ struct in6_addr addr;
+};
+
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -688,6 +693,9 @@ struct qeth_card_info {
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;
+ /* no bitfield, we take a pointer on these two: */
+ u8 has_lp2lp_cso_v6;
+ u8 has_lp2lp_cso_v4;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
@@ -786,6 +794,7 @@ struct qeth_card {
struct qeth_channel data;
struct net_device *dev;
+ struct dentry *debugfs;
struct qeth_card_stats stats;
struct qeth_card_info info;
struct qeth_token token;
@@ -797,6 +806,10 @@ struct qeth_card {
wait_queue_head_t wait_q;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
+ DECLARE_HASHTABLE(local_addrs4, 4);
+ DECLARE_HASHTABLE(local_addrs6, 4);
+ spinlock_t local_addrs4_lock;
+ spinlock_t local_addrs6_lock;
struct mutex ip_lock;
DECLARE_HASHTABLE(ip_mc_htable, 4);
struct work_struct rx_mode_work;
@@ -928,6 +941,25 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
return dst;
}
+static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
+}
+
+static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
+ return &rt->rt6i_gateway;
+ else
+ return &ipv6_hdr(skb)->daddr;
+}
+
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
@@ -1021,7 +1053,8 @@ struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
-void qeth_schedule_recovery(struct qeth_card *);
+int qeth_schedule_recovery(struct qeth_card *card);
+void qeth_flush_local_addrs(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f7689461c242..18a0fb75a710 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -26,6 +26,7 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
+#include <linux/rcutree.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -60,6 +61,7 @@ EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
+static struct dentry *qeth_debugfs_root;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
@@ -623,6 +625,257 @@ void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
}
EXPORT_SYMBOL_GPL(qeth_notify_cmd);
+static void qeth_flush_local_addrs4(struct qeth_card *card)
+{
+ struct qeth_local_addr *addr;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ spin_lock_irq(&card->local_addrs4_lock);
+ hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
+ hash_del_rcu(&addr->hnode);
+ kfree_rcu(addr, rcu);
+ }
+ spin_unlock_irq(&card->local_addrs4_lock);
+}
+
+static void qeth_flush_local_addrs6(struct qeth_card *card)
+{
+ struct qeth_local_addr *addr;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ spin_lock_irq(&card->local_addrs6_lock);
+ hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
+ hash_del_rcu(&addr->hnode);
+ kfree_rcu(addr, rcu);
+ }
+ spin_unlock_irq(&card->local_addrs6_lock);
+}
+
+void qeth_flush_local_addrs(struct qeth_card *card)
+{
+ qeth_flush_local_addrs4(card);
+ qeth_flush_local_addrs6(card);
+}
+EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
+
+static void qeth_add_local_addrs4(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs4 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs4_lock);
+ for (i = 0; i < cmd->count; i++) {
+ unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
+ struct qeth_local_addr *addr;
+ bool duplicate = false;
+
+ hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
+ if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate)
+ continue;
+
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr) {
+ dev_err(&card->gdev->dev,
+ "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
+ &cmd->addrs[i].addr);
+ continue;
+ }
+
+ ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
+ hash_add_rcu(card->local_addrs4, &addr->hnode, key);
+ }
+ spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_add_local_addrs6(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs6 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs6_lock);
+ for (i = 0; i < cmd->count; i++) {
+ u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
+ struct qeth_local_addr *addr;
+ bool duplicate = false;
+
+ hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
+ if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate)
+ continue;
+
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr) {
+ dev_err(&card->gdev->dev,
+ "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
+ &cmd->addrs[i].addr);
+ continue;
+ }
+
+ addr->addr = cmd->addrs[i].addr;
+ hash_add_rcu(card->local_addrs6, &addr->hnode, key);
+ }
+ spin_unlock(&card->local_addrs6_lock);
+}
+
+static void qeth_del_local_addrs4(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs4 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs4_lock);
+ for (i = 0; i < cmd->count; i++) {
+ struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
+ unsigned int key = ipv4_addr_hash(addr->addr);
+ struct qeth_local_addr *tmp;
+
+ hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
+ if (tmp->addr.s6_addr32[3] == addr->addr) {
+ hash_del_rcu(&tmp->hnode);
+ kfree_rcu(tmp, rcu);
+ break;
+ }
+ }
+ }
+ spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_del_local_addrs6(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs6 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs6_lock);
+ for (i = 0; i < cmd->count; i++) {
+ struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
+ u32 key = ipv6_addr_hash(&addr->addr);
+ struct qeth_local_addr *tmp;
+
+ hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
+ if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
+ hash_del_rcu(&tmp->hnode);
+ kfree_rcu(tmp, rcu);
+ break;
+ }
+ }
+ }
+ spin_unlock(&card->local_addrs6_lock);
+}
+
+static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
+ struct sk_buff *skb)
+{
+ struct qeth_local_addr *tmp;
+ bool is_local = false;
+ unsigned int key;
+ __be32 next_hop;
+
+ if (hash_empty(card->local_addrs4))
+ return false;
+
+ rcu_read_lock();
+ next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
+ key = ipv4_addr_hash(next_hop);
+
+ hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
+ if (tmp->addr.s6_addr32[3] == next_hop) {
+ is_local = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_local;
+}
+
+static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
+ struct sk_buff *skb)
+{
+ struct qeth_local_addr *tmp;
+ struct in6_addr *next_hop;
+ bool is_local = false;
+ u32 key;
+
+ if (hash_empty(card->local_addrs6))
+ return false;
+
+ rcu_read_lock();
+ next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
+ key = ipv6_addr_hash(next_hop);
+
+ hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
+ if (ipv6_addr_equal(&tmp->addr, next_hop)) {
+ is_local = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_local;
+}
+
+static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
+{
+ struct qeth_card *card = m->private;
+ struct qeth_local_addr *tmp;
+ unsigned int i;
+
+ rcu_read_lock();
+ hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
+ seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
+ hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
+ seq_printf(m, "%pI6c\n", &tmp->addr);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
+
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
@@ -686,9 +939,19 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
+
QETH_CARD_TEXT(card, 3, "irla");
return NULL;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
+
QETH_CARD_TEXT(card, 3, "urla");
return NULL;
default:
@@ -868,16 +1131,18 @@ static int qeth_set_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
+ int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
- if (!(card->thread_allowed_mask & thread) ||
- (card->thread_start_mask & thread)) {
- spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- return -EPERM;
- }
- card->thread_start_mask |= thread;
+ if (!(card->thread_allowed_mask & thread))
+ rc = -EPERM;
+ else if (card->thread_start_mask & thread)
+ rc = -EBUSY;
+ else
+ card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- return 0;
+
+ return rc;
}
static void qeth_clear_thread_start_bit(struct qeth_card *card,
@@ -930,11 +1195,17 @@ static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
-void qeth_schedule_recovery(struct qeth_card *card)
+int qeth_schedule_recovery(struct qeth_card *card)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "startrec");
- if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+
+ rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
+ if (!rc)
schedule_work(&card->kernel_thread_starter);
+
+ return rc;
}
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
@@ -1376,6 +1647,10 @@ static void qeth_setup_card(struct qeth_card *card)
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
+ hash_init(card->local_addrs4);
+ hash_init(card->local_addrs6);
+ spin_lock_init(&card->local_addrs4_lock);
+ spin_lock_init(&card->local_addrs6_lock);
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
@@ -1412,6 +1687,11 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
if (!card->read_cmd)
goto out_read_cmd;
+ card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
+ qeth_debugfs_root);
+ debugfs_create_file("local_addrs", 0400, card->debugfs, card,
+ &qeth_debugfs_local_addr_fops);
+
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -3345,11 +3625,11 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
+ struct qeth_qdio_out_buffer *buf = queue->bufs[index];
+ unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
struct qeth_card *card = queue->card;
- struct qeth_qdio_out_buffer *buf;
int rc;
int i;
- unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
@@ -3366,9 +3646,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (IS_IQD(card)) {
skb_queue_walk(&buf->skb_list, skb)
skb_tx_timestamp(skb);
- continue;
}
+ }
+ if (!IS_IQD(card)) {
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
(QETH_HIGH_WATERMARK_PACK -
@@ -3393,12 +3674,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
+
+ if (atomic_read(&queue->set_pci_flags_count))
+ qdio_flags |= QDIO_FLAG_PCI_OUT;
}
QETH_TXQ_STAT_INC(queue, doorbell);
- qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
- if (atomic_read(&queue->set_pci_flags_count))
- qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
@@ -3809,15 +4090,47 @@ static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}
-static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
- struct qeth_qdio_out_buffer *buf,
- bool is_first_elem, unsigned int offset)
+/**
+ * qeth_fill_buffer() - map skb into an output buffer
+ * @buf: buffer to transport the skb
+ * @skb: skb to map into the buffer
+ * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
+ * from qeth_core_header_cache.
+ * @offset: when mapping the skb, start at skb->data + offset
+ * @hd_len: if > 0, build a dedicated header element of this size
+ */
+static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
struct qdio_buffer *buffer = buf->buffer;
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
unsigned int elem_length, cnt;
+ bool is_first_elem = true;
+
+ __skb_queue_tail(&buf->skb_list, skb);
+
+ /* build dedicated element for HW Header */
+ if (hd_len) {
+ is_first_elem = false;
+
+ buffer->element[element].addr = virt_to_phys(hdr);
+ buffer->element[element].length = hd_len;
+ buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+
+ /* HW header is allocated from cache: */
+ if ((void *)hdr != skb->data)
+ buf->is_header[element] = 1;
+ /* HW header was pushed and is contiguous with linear part: */
+ else if (length > 0 && !PAGE_ALIGNED(data) &&
+ (data == (char *)hdr + hd_len))
+ buffer->element[element].eflags |=
+ SBAL_EFLAGS_CONTIGUOUS;
+
+ element++;
+ }
/* map linear part into buffer element(s) */
while (length > 0) {
@@ -3871,40 +4184,6 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
return element;
}
-/**
- * qeth_fill_buffer() - map skb into an output buffer
- * @buf: buffer to transport the skb
- * @skb: skb to map into the buffer
- * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
- * from qeth_core_header_cache.
- * @offset: when mapping the skb, start at skb->data + offset
- * @hd_len: if > 0, build a dedicated header element of this size
- */
-static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
-{
- struct qdio_buffer *buffer = buf->buffer;
- bool is_first_elem = true;
-
- __skb_queue_tail(&buf->skb_list, skb);
-
- /* build dedicated header element */
- if (hd_len) {
- int element = buf->next_element_to_fill;
- is_first_elem = false;
-
- buffer->element[element].addr = virt_to_phys(hdr);
- buffer->element[element].length = hd_len;
- buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
- /* remember to free cache-allocated qeth_hdr: */
- buf->is_header[element] = ((void *)hdr != skb->data);
- buf->next_element_to_fill++;
- }
-
- return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
-}
-
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, unsigned int elements,
struct qeth_hdr *hdr, unsigned int offset,
@@ -4889,9 +5168,11 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
+
+ unregister_service_level(&card->qeth_service_level);
+ debugfs_remove_recursive(card->debugfs);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
}
@@ -6153,32 +6434,6 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
qdio_free(CARD_DDEV(card));
}
-static int qeth_suspend(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
-
- qeth_set_offline(card, false);
- return 0;
-}
-
-static int qeth_resume(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- rc = qeth_set_online(card);
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
- return rc;
-}
-
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
@@ -6215,11 +6470,6 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.set_online = qeth_core_set_online,
.set_offline = qeth_core_set_offline,
.shutdown = qeth_core_shutdown,
- .prepare = NULL,
- .complete = NULL,
- .freeze = qeth_suspend,
- .thaw = qeth_resume,
- .restore = qeth_resume,
};
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
@@ -6300,7 +6550,7 @@ static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
}
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
- enum qeth_prot_versions prot)
+ enum qeth_prot_versions prot, u8 *lp2lp)
{
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
struct qeth_cmd_buffer *iob;
@@ -6352,18 +6602,17 @@ static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
- if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
- cstype == IPA_OUTBOUND_CHECKSUM)
- dev_warn(&card->gdev->dev,
- "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
- QETH_CARD_IFNAME(card));
+
+ if (lp2lp)
+ *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
+
return 0;
}
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
- enum qeth_prot_versions prot)
+ enum qeth_prot_versions prot, u8 *lp2lp)
{
- return on ? qeth_set_csum_on(card, cstype, prot) :
+ return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
qeth_set_csum_off(card, cstype, prot);
}
@@ -6451,13 +6700,13 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
- QETH_PROT_IPV4);
+ QETH_PROT_IPV4, NULL);
if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
/* no/one Offload Assist available, so the rc is trivial */
return rc_ipv4;
rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
- QETH_PROT_IPV6);
+ QETH_PROT_IPV6, NULL);
if (on)
/* enable: success if any Assist is active */
@@ -6493,6 +6742,24 @@ void qeth_enable_hw_features(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
+static void qeth_check_restricted_features(struct qeth_card *card,
+ netdev_features_t changed,
+ netdev_features_t actual)
+{
+ netdev_features_t ipv6_features = NETIF_F_TSO6;
+ netdev_features_t ipv4_features = NETIF_F_TSO;
+
+ if (!card->info.has_lp2lp_cso_v6)
+ ipv6_features |= NETIF_F_IPV6_CSUM;
+ if (!card->info.has_lp2lp_cso_v4)
+ ipv4_features |= NETIF_F_IP_CSUM;
+
+ if ((changed & ipv6_features) && !(actual & ipv6_features))
+ qeth_flush_local_addrs6(card);
+ if ((changed & ipv4_features) && !(actual & ipv4_features))
+ qeth_flush_local_addrs4(card);
+}
+
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -6504,13 +6771,15 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
+ &card->info.has_lp2lp_cso_v4);
if (rc)
changed ^= NETIF_F_IP_CSUM;
}
if (changed & NETIF_F_IPV6_CSUM) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
+ &card->info.has_lp2lp_cso_v6);
if (rc)
changed ^= NETIF_F_IPV6_CSUM;
}
@@ -6532,6 +6801,9 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
changed ^= NETIF_F_TSO6;
}
+ qeth_check_restricted_features(card, dev->features ^ features,
+ dev->features ^ changed);
+
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
return 0;
@@ -6568,6 +6840,34 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ /* Traffic with local next-hop is not eligible for some offloads: */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t restricted = 0;
+
+ if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
+ restricted |= NETIF_F_ALL_TSO;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ if (!card->info.has_lp2lp_cso_v4)
+ restricted |= NETIF_F_IP_CSUM;
+
+ if (restricted && qeth_next_hop_is_local_v4(card, skb))
+ features &= ~restricted;
+ break;
+ case htons(ETH_P_IPV6):
+ if (!card->info.has_lp2lp_cso_v6)
+ restricted |= NETIF_F_IPV6_CSUM;
+
+ if (restricted && qeth_next_hop_is_local_v6(card, skb))
+ features &= ~restricted;
+ break;
+ default:
+ break;
+ }
+ }
+
/* GSO segmentation builds skbs with
* a (small) linear part for the headers, and
* page frags for the data.
@@ -6717,17 +7017,17 @@ int qeth_stop(struct net_device *dev)
unsigned int i;
/* Quiesce the NAPI instances: */
- qeth_for_each_output_queue(card, queue, i) {
+ qeth_for_each_output_queue(card, queue, i)
napi_disable(&queue->napi);
- del_timer_sync(&queue->timer);
- }
/* Stop .ndo_start_xmit, might still access queue->napi. */
netif_tx_disable(dev);
- /* Queues may get re-allocated, so remove the NAPIs here. */
- qeth_for_each_output_queue(card, queue, i)
+ qeth_for_each_output_queue(card, queue, i) {
+ del_timer_sync(&queue->timer);
+ /* Queues may get re-allocated, so remove the NAPIs. */
netif_napi_del(&queue->napi);
+ }
} else {
netif_tx_disable(dev);
}
@@ -6745,6 +7045,8 @@ static int __init qeth_core_init(void)
pr_info("loading core functions\n");
+ qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
+
rc = qeth_register_dbf_views();
if (rc)
goto dbf_err;
@@ -6786,6 +7088,7 @@ slab_err:
register_err:
qeth_unregister_dbf_views();
dbf_err:
+ debugfs_remove_recursive(qeth_debugfs_root);
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
@@ -6799,6 +7102,7 @@ static void __exit qeth_core_exit(void)
kmem_cache_destroy(qeth_core_header_cache);
root_device_unregister(qeth_core_root_dev);
qeth_unregister_dbf_views();
+ debugfs_remove_recursive(qeth_debugfs_root);
pr_info("core functions removed\n");
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index d89a04bfd8b0..9d6f39d8f9ab 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -772,6 +772,29 @@ struct qeth_ipacmd_addr_change {
struct qeth_ipacmd_addr_change_entry entry[];
} __packed;
+/* [UN]REGISTER_LOCAL_ADDRESS notifications */
+struct qeth_ipacmd_local_addr4 {
+ __be32 addr;
+ u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs4 {
+ u32 count;
+ u32 addr_length;
+ struct qeth_ipacmd_local_addr4 addrs[];
+};
+
+struct qeth_ipacmd_local_addr6 {
+ struct in6_addr addr;
+ u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs6 {
+ u32 count;
+ u32 addr_length;
+ struct qeth_ipacmd_local_addr6 addrs[];
+};
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -803,6 +826,8 @@ struct qeth_ipa_cmd {
struct qeth_ipacmd_setbridgeport sbp;
struct qeth_ipacmd_addr_change addrchange;
struct qeth_ipacmd_vnicc vnicc;
+ struct qeth_ipacmd_local_addrs4 local_addrs4;
+ struct qeth_ipacmd_local_addrs6 local_addrs6;
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index d7e429f6631e..c901c942fed7 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -275,17 +275,20 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i;
+ bool reset;
+ int rc;
+
+ rc = kstrtobool(buf, &reset);
+ if (rc)
+ return rc;
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
- i = simple_strtoul(buf, &tmp, 16);
- if (i == 1)
- qeth_schedule_recovery(card);
+ if (reset)
+ rc = qeth_schedule_recovery(card);
- return count;
+ return rc ? rc : count;
}
static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0bd5b09e7a22..2d3bca3c0141 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
+#include <asm/chsc.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -27,8 +28,8 @@
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
-static void qeth_bridge_host_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
+static void qeth_addr_change_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
static void qeth_l2_vnicc_init(struct qeth_card *card);
static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
@@ -291,6 +292,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@@ -628,6 +630,72 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
schedule_work(&card->rx_mode_work);
}
+/**
+ * qeth_l2_pnso() - perform network subchannel operation
+ * @card: qeth_card structure pointer
+ * @cnc: Boolean Change-Notification Control
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Collects network information in a network address list and calls the
+ * callback function for every entry in the list. If "change-notification-
+ * control" is set, further changes in the address list will be reported
+ * via the IPA command.
+ */
+static int qeth_l2_pnso(struct qeth_card *card, int cnc,
+ void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
+ void *priv)
+{
+ struct ccw_device *ddev = CARD_DDEV(card);
+ struct chsc_pnso_area *rr;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "PNSO");
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = ccw_device_pnso(ddev, rr, rr->naihdr.resume_token, cnc);
+ if (rc)
+ continue;
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ if (size != sizeof(struct chsc_pnso_naid_l2)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ elems = (rr->response.length - sizeof(struct chsc_header) -
+ sizeof(struct chsc_pnso_naihdr)) / size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ (*cb)(priv, &rr->entries[i]);
+ } while ((rc == -EBUSY) || (!rc && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
+
+ free_page((unsigned long)rr);
+ return rc;
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
@@ -709,6 +777,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
+ netif_keep_dst(card->dev);
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
@@ -854,7 +923,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
} else
return 1;
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
- qeth_bridge_host_event(card, cmd);
+ qeth_addr_change_event(card, cmd);
return 0;
default:
return 1;
@@ -971,8 +1040,10 @@ enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
* for all currently registered addresses.
*/
static void qeth_bridge_emit_host_event(struct qeth_card *card,
- enum qeth_an_event_type evtype,
- u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid)
+ enum qeth_an_event_type evtype,
+ u8 code,
+ struct net_if_token *token,
+ struct mac_addr_lnid *addr_lnid)
{
char str[7][32];
char *env[8];
@@ -1089,74 +1160,76 @@ static void qeth_bridge_state_change(struct qeth_card *card,
queue_work(card->event_wq, &data->worker);
}
-struct qeth_bridge_host_data {
+struct qeth_addr_change_data {
struct work_struct worker;
struct qeth_card *card;
- struct qeth_ipacmd_addr_change hostevs;
+ struct qeth_ipacmd_addr_change ac_event;
};
-static void qeth_bridge_host_event_worker(struct work_struct *work)
+static void qeth_addr_change_event_worker(struct work_struct *work)
{
- struct qeth_bridge_host_data *data =
- container_of(work, struct qeth_bridge_host_data, worker);
+ struct qeth_addr_change_data *data =
+ container_of(work, struct qeth_addr_change_data, worker);
int i;
- if (data->hostevs.lost_event_mask) {
+ QETH_CARD_TEXT(data->card, 4, "adrchgew");
+ if (data->ac_event.lost_event_mask) {
dev_info(&data->card->gdev->dev,
-"Address notification from the Bridge Port stopped %s (%s)\n",
- data->card->dev->name,
- (data->hostevs.lost_event_mask == 0x01)
+ "Address change notification stopped on %s (%s)\n",
+ data->card->dev->name,
+ (data->ac_event.lost_event_mask == 0x01)
? "Overflow"
- : (data->hostevs.lost_event_mask == 0x02)
+ : (data->ac_event.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0;
mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
- 0, NULL, NULL);
+ 0, NULL, NULL);
} else
- for (i = 0; i < data->hostevs.num_entries; i++) {
+ for (i = 0; i < data->ac_event.num_entries; i++) {
struct qeth_ipacmd_addr_change_entry *entry =
- &data->hostevs.entry[i];
+ &data->ac_event.entry[i];
qeth_bridge_emit_host_event(data->card,
- anev_reg_unreg,
- entry->change_code,
- &entry->token, &entry->addr_lnid);
+ anev_reg_unreg,
+ entry->change_code,
+ &entry->token,
+ &entry->addr_lnid);
}
kfree(data);
}
-static void qeth_bridge_host_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd)
+static void qeth_addr_change_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
{
struct qeth_ipacmd_addr_change *hostevs =
&cmd->data.addrchange;
- struct qeth_bridge_host_data *data;
+ struct qeth_addr_change_data *data;
int extrasize;
- QETH_CARD_TEXT(card, 2, "brhostev");
+ QETH_CARD_TEXT(card, 4, "adrchgev");
if (cmd->hdr.return_code != 0x0000) {
if (cmd->hdr.return_code == 0x0010) {
if (hostevs->lost_event_mask == 0x00)
hostevs->lost_event_mask = 0xff;
} else {
- QETH_CARD_TEXT_(card, 2, "BPHe%04x",
+ QETH_CARD_TEXT_(card, 2, "ACHN%04x",
cmd->hdr.return_code);
return;
}
}
extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
hostevs->num_entries;
- data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize,
- GFP_ATOMIC);
+ data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
+ GFP_ATOMIC);
if (!data) {
- QETH_CARD_TEXT(card, 2, "BPHalloc");
+ QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
- INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
+ INIT_WORK(&data->worker, qeth_addr_change_event_worker);
data->card = card;
- memcpy(&data->hostevs, hostevs,
+ memcpy(&data->ac_event, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
queue_work(card->event_wq, &data->worker);
}
@@ -1446,63 +1519,18 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
}
-/**
- * qeth_anset_makerc() - derive "traditional" error from hardware codes.
- * @card: qeth_card structure pointer, for debug messages.
- *
- * Returns negative errno-compatible error indication or 0 on success.
- */
-static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
-{
- int rc;
-
- if (pnso_rc == 0)
- switch (response) {
- case 0x0001:
- rc = 0;
- break;
- case 0x0004:
- case 0x0100:
- case 0x0106:
- rc = -EOPNOTSUPP;
- dev_err(&card->gdev->dev,
- "Setting address notification failed\n");
- break;
- case 0x0107:
- rc = -EAGAIN;
- break;
- default:
- rc = -EIO;
- }
- else
- rc = -EIO;
-
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc);
- QETH_CARD_TEXT_(card, 2, "SBPr%04x", response);
- }
- return rc;
-}
-
static void qeth_bridgeport_an_set_cb(void *priv,
- enum qdio_brinfo_entry_type type, void *entry)
+ struct chsc_pnso_naid_l2 *entry)
{
struct qeth_card *card = (struct qeth_card *)priv;
- struct qdio_brinfo_entry_l2 *l2entry;
u8 code;
- if (type != l2_addr_lnid) {
- WARN_ON_ONCE(1);
- return;
- }
-
- l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
- if (l2entry->addr_lnid.lnid < VLAN_N_VID)
+ if (entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
- (struct net_if_token *)&l2entry->nit,
- (struct mac_addr_lnid *)&l2entry->addr_lnid);
+ (struct net_if_token *)&entry->nit,
+ (struct mac_addr_lnid *)&entry->addr_lnid);
}
/**
@@ -1518,22 +1546,16 @@ static void qeth_bridgeport_an_set_cb(void *priv,
int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
{
int rc;
- u16 response;
- struct ccw_device *ddev;
- struct subchannel_id schid;
if (!card->options.sbp.supported_funcs)
return -EOPNOTSUPP;
- ddev = CARD_DDEV(card);
- ccw_device_get_schid(ddev, &schid);
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
- rc = qdio_pnso_brinfo(schid, 1, &response,
- qeth_bridgeport_an_set_cb, card);
+ rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
} else
- rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
- return qeth_anset_makerc(card, rc, response);
+ rc = qeth_l2_pnso(card, 0, NULL, NULL);
+ return rc;
}
static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0742a749d26e..1e50aa0297a3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1176,6 +1176,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@@ -1693,8 +1694,8 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
if (skb->protocol == htons(ETH_P_AF_IUCV)) {
l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
- memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+ l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
iucv_trans_hdr(skb)->destUserID, 8);
return;
}
@@ -1728,18 +1729,10 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
if (ipv == 4) {
- struct rtable *rt = (struct rtable *) dst;
-
- *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
- rt_nexthop(rt, ip_hdr(skb)->daddr) :
- ip_hdr(skb)->daddr;
+ l3_hdr->next_hop.addr.s6_addr32[3] =
+ qeth_next_hop_v4_rcu(skb, dst);
} else if (ipv == 6) {
- struct rt6_info *rt = (struct rt6_info *) dst;
-
- if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
- else
- l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
+ l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 066b5c3aaae6..c84ec2fbf99b 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -29,12 +29,9 @@ MODULE_AUTHOR
MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
static struct iucv_path *smsg_path;
-/* dummy device used as trigger for PM functions */
-static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
-static int iucv_path_connected;
static int smsg_path_pending(struct iucv_path *, u8 *, u8 *);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -124,60 +121,15 @@ void smsg_unregister_callback(const char *prefix,
kfree(cb);
}
-static int smsg_pm_freeze(struct device *dev)
-{
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "smsg_pm_freeze\n");
-#endif
- if (smsg_path && iucv_path_connected) {
- iucv_path_sever(smsg_path, NULL);
- iucv_path_connected = 0;
- }
- return 0;
-}
-
-static int smsg_pm_restore_thaw(struct device *dev)
-{
- int rc;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "smsg_pm_restore_thaw\n");
-#endif
- if (smsg_path && !iucv_path_connected) {
- memset(smsg_path, 0, sizeof(*smsg_path));
- smsg_path->msglim = 255;
- smsg_path->flags = 0;
- rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
- NULL, NULL, NULL);
-#ifdef CONFIG_PM_DEBUG
- if (rc)
- printk(KERN_ERR
- "iucv_path_connect returned with rc %i\n", rc);
-#endif
- if (!rc)
- iucv_path_connected = 1;
- cpcmd("SET SMSG IUCV", NULL, 0, NULL);
- }
- return 0;
-}
-
-static const struct dev_pm_ops smsg_pm_ops = {
- .freeze = smsg_pm_freeze,
- .thaw = smsg_pm_restore_thaw,
- .restore = smsg_pm_restore_thaw,
-};
-
static struct device_driver smsg_driver = {
.owner = THIS_MODULE,
.name = SMSGIUCV_DRV_NAME,
.bus = &iucv_bus,
- .pm = &smsg_pm_ops,
};
static void __exit smsg_exit(void)
{
cpcmd("SET SMSG OFF", NULL, 0, NULL);
- device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
}
@@ -205,27 +157,10 @@ static int __init smsg_init(void)
NULL, NULL, NULL);
if (rc)
goto out_free_path;
- else
- iucv_path_connected = 1;
- smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!smsg_dev) {
- rc = -ENOMEM;
- goto out_free_path;
- }
- dev_set_name(smsg_dev, "smsg_iucv");
- smsg_dev->bus = &iucv_bus;
- smsg_dev->parent = iucv_root;
- smsg_dev->release = (void (*)(struct device *))kfree;
- smsg_dev->driver = &smsg_driver;
- rc = device_register(smsg_dev);
- if (rc)
- goto out_put;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
-out_put:
- put_device(smsg_dev);
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 09ec846fe01d..18b713a616de 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
/*
@@ -415,8 +415,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
- if (!zfcp_scsi_adapter_register(adapter))
- return adapter;
+ return adapter;
failed:
zfcp_adapter_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index b9c93d15f67c..3852367f15f6 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -4,7 +4,7 @@
*
* Definitions for handling diagnostics in the the zfcp device driver.
*
- * Copyright IBM Corp. 2018
+ * Copyright IBM Corp. 2018, 2020
*/
#ifndef ZFCP_DIAG_H
@@ -56,11 +56,11 @@ struct zfcp_diag_adapter {
unsigned long max_age;
- struct {
+ struct zfcp_diag_adapter_port_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_port data;
} port_data;
- struct {
+ struct zfcp_diag_adapter_config_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_config data;
} config_data;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3d0bc000f500..db320dab1fee 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_MAX_ERPS 3
@@ -768,10 +769,14 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED;
+ return ZFCP_ERP_SUCCEEDED;
+}
+
+static void
+zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
+{
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter);
-
- return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
@@ -800,6 +805,59 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
return ZFCP_ERP_SUCCEEDED;
}
+static enum zfcp_erp_act_result
+zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter_config_data *const config_data =
+ &adapter->diagnostics->config_data;
+ struct zfcp_diag_adapter_port_data *const port_data =
+ &adapter->diagnostics->port_data;
+ unsigned long flags;
+ int rc;
+
+ rc = zfcp_scsi_adapter_register(adapter);
+ if (rc == -EEXIST)
+ return ZFCP_ERP_SUCCEEDED;
+ else if (rc)
+ return ZFCP_ERP_FAILED;
+
+ /*
+ * We allocated the shost for the first time. Before it was NULL,
+ * and so we deferred all updates in the xconf- and xport-data
+ * handlers. We need to make up for that now, and make all the updates
+ * that would have been done before.
+ *
+ * We can be sure that xconf- and xport-data succeeded, because
+ * otherwise this function is not called. But they might have been
+ * incomplete.
+ */
+
+ spin_lock_irqsave(&config_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
+ !!config_data->header.incomplete);
+ spin_unlock_irqrestore(&config_data->header.access_lock, flags);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ spin_lock_irqsave(&port_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
+ spin_unlock_irqrestore(&port_data->header.access_lock, flags);
+ }
+
+ /*
+ * There is a remote possibility that the 'Exchange Port Data' request
+ * reports a different connectivity status than 'Exchange Config Data'.
+ * But any change to the connectivity status of the local optic that
+ * happens after the initial xconf request is expected to be reported
+ * to us, as soon as we post Status Read Buffers to the FCP channel
+ * firmware after this function. So any resulting inconsistency will
+ * only be momentary.
+ */
+ if (config_data->header.incomplete)
+ zfcp_fsf_fc_host_link_down(adapter);
+
+ return ZFCP_ERP_SUCCEEDED;
+}
+
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
struct zfcp_erp_action *act)
{
@@ -809,6 +867,12 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
+ if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
+ ZFCP_ERP_FAILED)
+ return ZFCP_ERP_FAILED;
+
+ zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
+
if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
@@ -1636,6 +1700,13 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
@@ -1673,6 +1744,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 88294ca0e2ea..3ef5d74331c3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -125,6 +125,7 @@ extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
+extern u32 zfcp_fsf_convert_portspeed(u32 fsf_speed);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -134,6 +135,7 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
@@ -153,6 +155,8 @@ extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *);
+extern void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
@@ -169,6 +173,13 @@ extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+extern void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete);
+extern void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom);
/* zfcp_sysfs.c */
extern const struct attribute_group *zfcp_unit_attr_groups[];
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 111fe3fc32d7..c795f22249d8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -120,21 +120,25 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
-static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost = adapter->scsi_host;
+ adapter->hydra_version = 0;
+ adapter->peer_wwpn = 0;
+ adapter->peer_wwnn = 0;
+ adapter->peer_d_id = 0;
+
+ /* if there is no shost yet, we have nothing to zero-out */
+ if (shost == NULL)
+ return;
+
fc_host_port_id(shost) = 0;
fc_host_fabric_name(shost) = 0;
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
- adapter->hydra_version = 0;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
-
- adapter->peer_wwpn = 0;
- adapter->peer_wwnn = 0;
- adapter->peer_d_id = 0;
}
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
@@ -479,7 +483,7 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
-static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{
u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
@@ -509,64 +513,36 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
- struct Scsi_Host *shost = adapter->scsi_host;
- struct fc_els_flogi *nsp, *plogi;
+ struct fc_els_flogi *plogi;
/* adjust pointers for missing command code */
- nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- - sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
- "IBM");
- fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
- fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
- fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
- zfcp_scsi_set_prot(adapter);
-
/* no error return above here, otherwise must fix call chains */
/* do not evaluate invalid fields */
if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
return 0;
- fc_host_port_id(shost) = ntoh24(bottom->s_id);
- fc_host_speed(shost) =
- zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
-
adapter->hydra_version = bottom->adapter_type;
- snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
- bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id);
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
- fc_host_port_type(shost) = FC_PORTTYPE_PTP;
- fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
- fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
- if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
- else
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
- fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
- fc_host_fabric_name(shost) = 0;
- fallthrough;
default:
- fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
@@ -584,13 +560,10 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
- snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
@@ -606,6 +579,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_config_data(adapter, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -630,6 +604,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_config_data(adapter, bottom, true);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
break;
@@ -638,16 +614,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
return;
}
- if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
adapter->hardware_version = bottom->hardware_version;
- snprintf(fc_host_hardware_version(shost),
- FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->hardware_version);
- memcpy(fc_host_serial_number(shost), bottom->serial_number,
- min(FC_SERIAL_NUMBER_SIZE, 17));
- EBCASC(fc_host_serial_number(shost),
- min(FC_SERIAL_NUMBER_SIZE, 17));
- }
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
@@ -761,19 +729,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) =
- zfcp_fsf_convert_portspeed(bottom->supported_speed);
- memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
- FC_FC4_LIST_SIZE);
- memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
- FC_FC4_LIST_SIZE);
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
adapter->fc_security_algorithms =
bottom->fc_security_algorithms;
@@ -800,6 +759,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -808,6 +768,8 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 26702b56a7ab..3a7f3374d10a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -342,6 +342,18 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, 0);
}
+void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ shost->sg_tablesize = qdio->max_sbale_per_req;
+ shost->max_sectors = qdio->max_sbale_per_req * 8;
+}
+
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
@@ -420,10 +432,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
- if (adapter->scsi_host) {
- adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
- adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
- }
+ zfcp_qdio_shost_update(adapter, qdio);
return 0;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 13d873f806e4..d58bf79892f2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -451,26 +451,39 @@ static struct scsi_host_template zfcp_scsi_host_template = {
};
/**
- * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
+ * SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
+ *
+ * Allocates the SCSI host object for the given adapter, sets basic properties
+ * (such as the transport template, QDIO limits, ...), and registers it with
+ * the midlayer.
+ *
+ * During registration with the midlayer the corresponding FC host object for
+ * the referenced transport class is also implicitely allocated.
+ *
+ * Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
+ * adapter->scsi_host is already set, nothing is done.
+ *
+ * Return:
+ * * 0 - Allocation and registration was successful
+ * * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
+ * was changed
+ * * -EIO - Allocation or registration failed
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
- return 0;
+ return -EEXIST;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
- if (!adapter->scsi_host) {
- dev_err(&adapter->ccw_device->dev,
- "Registering the FCP device with the "
- "SCSI stack failed\n");
- return -EIO;
- }
+ if (!adapter->scsi_host)
+ goto err_out;
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
@@ -480,14 +493,23 @@ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
+ /* make all basic properties known at registration time */
+ zfcp_qdio_shost_update(adapter, adapter->qdio);
+ zfcp_scsi_set_prot(adapter);
+
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
- return -EIO;
+ goto err_out;
}
return 0;
+err_out:
+ adapter->scsi_host = NULL;
+ dev_err(&adapter->ccw_device->dev,
+ "Registering the FCP device with the SCSI stack failed\n");
+ return -EIO;
}
/**
@@ -841,6 +863,95 @@ void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
set_host_byte(scmd, DID_SOFT_ERROR);
}
+void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+ const struct fc_els_flogi *nsp, *plogi;
+
+ if (shost == NULL)
+ return;
+
+ snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->lic_version);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ snprintf(fc_host_hardware_version(shost),
+ FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->hardware_version);
+ memcpy(fc_host_serial_number(shost), bottom->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ EBCASC(fc_host_serial_number(shost),
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ }
+
+ /* adjust pointers for missing command code */
+ nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+ - sizeof(u32));
+ plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+ - sizeof(u32));
+
+ snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+ "IBM");
+ fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+ fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ zfcp_scsi_set_prot(adapter);
+
+ /* do not evaluate invalid fields */
+ if (bottom_incomplete)
+ return;
+
+ fc_host_port_id(shost) = ntoh24(bottom->s_id);
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+ bottom->adapter_type);
+
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ fc_host_fabric_name(shost) = 0;
+ break;
+ case FSF_TOPO_FABRIC:
+ fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ break;
+ case FSF_TOPO_AL:
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ fallthrough;
+ default:
+ fc_host_fabric_name(shost) = 0;
+ break;
+ }
+}
+
+void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
+}
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 7ec30ded0169..8d9662e8b717 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -216,20 +216,32 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ int retval = 0;
if (!adapter)
return -ENODEV;
/*
+ * If `scsi_host` is missing, we can't schedule `scan_work`, as it
+ * makes use of the corresponding fc_host object. But this state is
+ * only possible if xconfig/xport data has never completed yet,
+ * and we couldn't successfully scan for ports anyway.
+ */
+ if (adapter->scsi_host == NULL) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
* Users wish is our command: immediately schedule and flush a
* worker to conduct a synchronous port scan, that is, neither
* a random delay nor a rate limit is applied here.
*/
queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
flush_delayed_work(&adapter->scan_work);
+out:
zfcp_ccw_adapter_put(adapter);
-
- return (ssize_t) count;
+ return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);