diff options
author | Roland Dreier <roland@purestorage.com> | 2011-08-16 20:40:01 +0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-08-22 23:28:36 +0400 |
commit | 28638887f351d11867562322b7abaa014dd5528a (patch) | |
tree | 6540c800b66ef3045e942125e57b756053fc84b5 /drivers/target/target_core_device.c | |
parent | e63a8e1933a2218cf801e46dd01bd8cca4a555ec (diff) | |
download | linux-28638887f351d11867562322b7abaa014dd5528a.tar.xz |
target: Convert acl_node_lock to be IRQ-disabling
With qla2xxx, acl_node_lock is taken inside qla2xxx's hardware_lock,
which is taken in hardirq context. This means acl_node_lock must become
an IRQ-disabling lock; in particular this fixes lockdep warnings along
the lines of
======================================================
[ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ]
(&(&se_tpg->acl_node_lock)->rlock){+.....}, at: [<ffffffffa026f872>] transport_deregister_session+0x92/0x140 [target_core_mod]
and this task is already holding:
(&(&ha->hardware_lock)->rlock){-.-...}, at: [<ffffffffa017c5e7>] qla_tgt_stop_phase1+0x57/0x2c0 [qla2xxx]
which would create a new lock dependency:
(&(&ha->hardware_lock)->rlock){-.-...} -> (&(&se_tpg->acl_node_lock)->rlock){+.....}
but this new dependency connects a HARDIRQ-irq-safe lock:
(&(&ha->hardware_lock)->rlock){-.-...}
to a HARDIRQ-irq-unsafe lock:
(&(&se_tpg->acl_node_lock)->rlock){+.....}
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r-- | drivers/target/target_core_device.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 4b5237f500a5..ca6e4a4df134 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) struct se_dev_entry *deve; u32 i; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { @@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) } spin_unlock_irq(&nacl->device_list_lock); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); } static struct se_port *core_alloc_port(struct se_device *dev) @@ -1372,17 +1372,17 @@ struct se_lun *core_dev_add_lun( */ if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { struct se_node_acl *acl; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { if (acl->dynamic_node_acl && (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); core_tpg_add_node_to_devs(acl, tpg); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); } } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); } return lun_p; |