summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c13
-rw-r--r--drivers/char/apm-emulation.c3
-rw-r--r--drivers/char/ds1302.c1
-rw-r--r--drivers/char/efirtc.c1
-rw-r--r--drivers/char/genrtc.c1
-rw-r--r--drivers/char/hpet.c5
-rw-r--r--drivers/char/hw_random/nomadik-rng.c13
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/tx4939-rng.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c243
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c73
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c25
-rw-r--r--drivers/char/lp.c6
-rw-r--r--drivers/char/mbcs.c1
-rw-r--r--drivers/char/mspec.c1
-rw-r--r--drivers/char/mwave/3780i.c1
-rw-r--r--drivers/char/nvram.c3
-rw-r--r--drivers/char/nwflash.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/char/rtc.c5
-rw-r--r--drivers/char/sonypi.c1
-rw-r--r--drivers/char/tile-srom.c12
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/tpm.c3
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c27
-rw-r--r--drivers/char/viotape.c1041
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
34 files changed, 227 insertions, 1291 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index b427711be4be..962e75dc4781 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -850,6 +850,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
}
+ ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
ID(PCI_DEVICE_ID_INTEL_82443LX_0),
ID(PCI_DEVICE_ID_INTEL_82443BX_0),
ID(PCI_DEVICE_ID_INTEL_82443GX_0),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 5da67f165afa..7ea18a5fe71c 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -234,6 +234,7 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c92424ca1a55..7f025fb620de 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -76,7 +76,6 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
- dma_addr_t scratch_page_dma;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -306,9 +305,9 @@ static int intel_gtt_setup_scratch_page(void)
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
- intel_private.scratch_page_dma = dma_addr;
+ intel_private.base.scratch_page_dma = dma_addr;
} else
- intel_private.scratch_page_dma = page_to_phys(page);
+ intel_private.base.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@@ -631,7 +630,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
- pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+ pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@@ -681,6 +680,7 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers);
return -ENOMEM;
}
+ intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */
@@ -975,7 +975,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
- intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
@@ -1190,7 +1190,6 @@ static inline int needs_idle_maps(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- extern int intel_iommu_gfx_mapped;
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
@@ -1459,6 +1458,8 @@ static const struct intel_gtt_driver_description {
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index f4837a893dfa..46118f845948 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -31,7 +31,6 @@
#include <linux/kthread.h>
#include <linux/delay.h>
-#include <asm/system.h>
/*
* The apm_bios device is one of the misc char devices.
@@ -302,7 +301,7 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg)
* anything critical, chill a bit on each iteration.
*/
while (wait_event_freezable(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE))
+ as->suspend_state != SUSPEND_ACKED))
msleep(10);
break;
case SUSPEND_ACKTO:
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index ed8303f9890c..7d34b203718a 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <asm/system.h>
#include <asm/rtc.h>
#if defined(CONFIG_M32R)
#include <asm/m32r.h>
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 53c524e7b829..a082d00b0f11 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -37,7 +37,6 @@
#include <linux/efi.h>
#include <linux/uaccess.h>
-#include <asm/system.h>
#define EFI_RTC_VERSION "0.4"
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index f773a9dd14f3..21cb980f1157 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -56,7 +56,6 @@
#include <linux/workqueue.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/rtc.h>
/*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 0833896cf6f2..dfd7876f127c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -36,7 +36,6 @@
#include <linux/io.h>
#include <asm/current.h>
-#include <asm/system.h>
#include <asm/irq.h>
#include <asm/div64.h>
@@ -907,8 +906,8 @@ int hpet_alloc(struct hpet_data *hdp)
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
- printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
- printk("\n");
+ printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk(KERN_CONT "\n");
temp = hpetp->hp_tick_freq;
remainder = do_div(temp, 1000000);
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 3d3c1e6703b4..96de0249e595 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -107,17 +107,6 @@ static struct amba_driver nmk_rng_driver = {
.id_table = nmk_rng_ids,
};
-static int __init nmk_rng_init(void)
-{
- return amba_driver_register(&nmk_rng_driver);
-}
-
-static void __devexit nmk_rng_exit(void)
-{
- amba_driver_unregister(&nmk_rng_driver);
-}
-
-module_init(nmk_rng_init);
-module_exit(nmk_rng_exit);
+module_amba_driver(nmk_rng_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b757fac3cd1f..a07a5caa599c 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -26,6 +26,8 @@
#include <asm/io.h>
+#include <plat/cpu.h>
+
#define RNG_OUT_REG 0x00 /* Output register */
#define RNG_STAT_REG 0x04 /* Status register
[0] = STAT_BUSY */
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index 0bc0cb70210b..de473ef3882b 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -115,10 +115,7 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
if (!rngdev)
return -ENOMEM;
- if (!devm_request_mem_region(&dev->dev, r->start, resource_size(r),
- dev_name(&dev->dev)))
- return -EBUSY;
- rngdev->base = devm_ioremap(&dev->dev, r->start, resource_size(r));
+ rngdev->base = devm_request_and_ioremap(&dev->dev, r);
if (!rngdev->base)
return -EBUSY;
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 2aa3977aae5e..9eb360ff8cab 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
-#include <asm/system.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index cf82fedae099..e53fc24c6af3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
/* Timeouts in microseconds. */
-#define IBF_RETRY_TIMEOUT 1000000
-#define OBF_RETRY_TIMEOUT 1000000
+#define IBF_RETRY_TIMEOUT 5000000
+#define OBF_RETRY_TIMEOUT 5000000
#define MAX_ERROR_RETRIES 10
#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 58c0e6387cf7..2c29942b1326 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/errno.h>
-#include <asm/system.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -46,6 +45,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
#define PFX "IPMI message handler: "
@@ -53,6 +53,8 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(unsigned long);
+static void handle_new_recv_msgs(ipmi_smi_t intf);
static int initialized;
@@ -355,12 +357,15 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages that were delayed for some reason (out of memory,
- * for instance), will go in here to be processed later in a
- * periodic timer interrupt.
+ * Messages queued for delivery. If delivery fails (out of memory
+ * for instance), They will stay in here to be processed later in a
+ * periodic timer interrupt. The tasklet is for handling received
+ * messages directly from the handler.
*/
spinlock_t waiting_msgs_lock;
struct list_head waiting_msgs;
+ atomic_t watchdog_pretimeouts_to_deliver;
+ struct tasklet_struct recv_tasklet;
/*
* The list of command receivers that are registered for commands
@@ -493,6 +498,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
+ tasklet_kill(&intf->recv_tasklet);
+
free_smi_msg_list(&intf->waiting_msgs);
free_recv_msg_list(&intf->waiting_events);
@@ -2786,12 +2793,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
return;
}
-void ipmi_poll_interface(ipmi_user_t user)
+static void ipmi_poll(ipmi_smi_t intf)
{
- ipmi_smi_t intf = user->intf;
-
if (intf->handlers->poll)
intf->handlers->poll(intf->send_info);
+ /* In case something came in */
+ handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(ipmi_user_t user)
+{
+ ipmi_poll(user->intf);
}
EXPORT_SYMBOL(ipmi_poll_interface);
@@ -2860,6 +2872,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
#endif
spin_lock_init(&intf->waiting_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_msgs);
+ tasklet_init(&intf->recv_tasklet,
+ smi_recv_tasklet,
+ (unsigned long) intf);
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->events_lock);
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
@@ -3622,11 +3638,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
}
/*
- * Handle a new message. Return 1 if the message should be requeued,
+ * Handle a received message. Return 1 if the message should be requeued,
* 0 if the message should be freed, or -1 if the message should not
* be freed or requeued.
*/
-static int handle_new_recv_msg(ipmi_smi_t intf,
+static int handle_one_recv_msg(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
int requeue;
@@ -3784,12 +3800,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
return requeue;
}
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(ipmi_smi_t intf)
+{
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = intf->run_to_completion;
+
+ /* See if any waiting messages need to be processed. */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_msgs)) {
+ smi_msg = list_entry(intf->waiting_msgs.next,
+ struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ rv = handle_one_recv_msg(intf, smi_msg);
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ if (rv == 0) {
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ } else if (rv < 0) {
+ /* Fatal error on the message, del but don't free. */
+ } else {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message.
+ */
+ list_add(&smi_msg->link, &intf->waiting_msgs);
+ break;
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ ipmi_user_t user;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ rcu_read_unlock();
+ }
+}
+
+static void smi_recv_tasklet(unsigned long val)
+{
+ handle_new_recv_msgs((ipmi_smi_t) val);
+}
+
/* Handle a new message from the lower layer. */
void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int rv;
int run_to_completion;
@@ -3843,31 +3919,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
run_to_completion = intf->run_to_completion;
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- if (!list_empty(&intf->waiting_msgs)) {
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- goto out;
- }
+ list_add_tail(&msg->link, &intf->waiting_msgs);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- rv = handle_new_recv_msg(intf, msg);
- if (rv > 0) {
- /*
- * Could not handle the message now, just add it to a
- * list to handle later.
- */
- run_to_completion = intf->run_to_completion;
- if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- } else if (rv == 0) {
- ipmi_free_smi_msg(msg);
- }
-
+ tasklet_schedule(&intf->recv_tasklet);
out:
return;
}
@@ -3875,16 +3931,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
- ipmi_user_t user;
-
- rcu_read_lock();
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (!user->handler->ipmi_watchdog_pretimeout)
- continue;
-
- user->handler->ipmi_watchdog_pretimeout(user->handler_data);
- }
- rcu_read_unlock();
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+ tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -3998,28 +4046,12 @@ static void ipmi_timeout_handler(long timeout_period)
ipmi_smi_t intf;
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
- struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
int i;
rcu_read_lock();
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- /* See if any waiting messages need to be processed. */
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_for_each_entry_safe(smi_msg, smi_msg2,
- &intf->waiting_msgs, link) {
- if (!handle_new_recv_msg(intf, smi_msg)) {
- list_del(&smi_msg->link);
- ipmi_free_smi_msg(smi_msg);
- } else {
- /*
- * To preserve message order, quit if we
- * can't handle a message.
- */
- break;
- }
- }
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ tasklet_schedule(&intf->recv_tasklet);
/*
* Go through the seq table and find any messages that
@@ -4173,12 +4205,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
#ifdef CONFIG_IPMI_PANIC_EVENT
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
+ atomic_dec(&panic_done_count);
}
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
+ atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+ int rv;
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+ atomic_add(2, &panic_done_count);
+ rv = i_ipmi_request(NULL,
+ intf,
+ addr,
+ 0,
+ msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll(intf);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4217,8 +4285,6 @@ static void send_panic_events(char *str)
unsigned char data[16];
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
- struct ipmi_smi_msg smi_msg;
- struct ipmi_recv_msg recv_msg;
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4246,9 +4312,6 @@ static void send_panic_events(char *str)
data[7] = str[2];
}
- smi_msg.done = dummy_smi_done_handler;
- recv_msg.done = dummy_recv_done_handler;
-
/* For every registered interface, send the event. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (!intf->handlers)
@@ -4258,18 +4321,7 @@ static void send_panic_events(char *str)
intf->run_to_completion = 1;
/* Send the event announcing the panic. */
intf->handlers->set_run_to_completion(intf->send_info, 1);
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4317,18 +4369,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -4337,18 +4378,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -4405,18 +4435,7 @@ static void send_panic_events(char *str)
strncpy(data+5, p, 11);
p += size;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
#endif /* CONFIG_IPMI_PANIC_STRING */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 50fcf9c04569..1e638fff40ea 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -41,7 +41,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <asm/system.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/timer.h>
@@ -171,7 +170,6 @@ struct smi_info {
struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
- spinlock_t msg_lock;
struct list_head xmit_msgs;
struct list_head hp_xmit_msgs;
struct ipmi_smi_msg *curr_msg;
@@ -320,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
- /* Deliver the message to the upper layer with the lock
- released. */
-
- if (smi_info->run_to_completion) {
- ipmi_smi_msg_received(smi_info->intf, msg);
- } else {
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
- }
+ /* Deliver the message to the upper layer. */
+ ipmi_smi_msg_received(smi_info->intf, msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -358,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
struct timeval t;
#endif
- /*
- * No need to save flags, we aleady have interrupts off and we
- * already hold the SMI lock.
- */
- if (!smi_info->run_to_completion)
- spin_lock(&(smi_info->msg_lock));
-
/* Pick the high priority queue first. */
if (!list_empty(&(smi_info->hp_xmit_msgs))) {
entry = smi_info->hp_xmit_msgs.next;
@@ -402,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
- if (!smi_info->run_to_completion)
- spin_unlock(&(smi_info->msg_lock));
-
return rv;
}
@@ -481,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info)
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
- spin_unlock(&(smi_info->si_lock));
ipmi_smi_watchdog_pretimeout(smi_info->intf);
- spin_lock(&(smi_info->si_lock));
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -889,19 +867,6 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
- /*
- * last_timeout_jiffies is updated here to avoid
- * smi_timeout() handler passing very large time_diff
- * value to smi_event_handler() that causes
- * the send command to abort.
- */
- smi_info->last_timeout_jiffies = jiffies;
-
- mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
-
- if (smi_info->thread)
- wake_up_process(smi_info->thread);
-
if (smi_info->run_to_completion) {
/*
* If we are running to completion, then throw it in
@@ -924,16 +889,29 @@ static void sender(void *send_info,
return;
}
- spin_lock_irqsave(&smi_info->msg_lock, flags);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
if (priority > 0)
list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
else
list_add_tail(&msg->link, &smi_info->xmit_msgs);
- spin_unlock_irqrestore(&smi_info->msg_lock, flags);
- spin_lock_irqsave(&smi_info->si_lock, flags);
- if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+ /*
+ * last_timeout_jiffies is updated here to avoid
+ * smi_timeout() handler passing very large time_diff
+ * value to smi_event_handler() that causes
+ * the send command to abort.
+ */
+ smi_info->last_timeout_jiffies = jiffies;
+
+ mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
start_next_msg(smi_info);
+ smi_event_handler(smi_info, 0);
+ }
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
@@ -1034,16 +1012,19 @@ static int ipmi_thread(void *data)
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
- unsigned long flags;
+ unsigned long flags = 0;
+ int run_to_completion = smi_info->run_to_completion;
/*
* Make sure there is some delay in the poll loop so we can
* drive time forward and timeout things.
*/
udelay(10);
- spin_lock_irqsave(&smi_info->si_lock, flags);
+ if (!run_to_completion)
+ spin_lock_irqsave(&smi_info->si_lock, flags);
smi_event_handler(smi_info, 10);
- spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void request_events(void *send_info)
@@ -1680,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void)
{
struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info) {
+ if (info)
spin_lock_init(&info->si_lock);
- spin_lock_init(&info->msg_lock);
- }
return info;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 34767a6d7f42..7ed356e52035 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -153,7 +153,7 @@
#endif
static DEFINE_MUTEX(ipmi_watchdog_mutex);
-static int nowayout = WATCHDOG_NOWAYOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
static ipmi_user_t watchdog_user;
static int watchdog_ifnum;
@@ -320,7 +320,7 @@ module_param(start_now, int, 0444);
MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
"soon as the driver is loaded.");
-module_param(nowayout, int, 0644);
+module_param(nowayout, bool, 0644);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=CONFIG_WATCHDOG_NOWAYOUT)");
@@ -520,6 +520,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -528,8 +529,8 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_smi_msg,
&panic_halt_heartbeat_recv_msg,
1);
- if (!rv)
- atomic_add(2, &panic_done_count);
+ if (rv)
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -553,16 +554,18 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
+ atomic_add(2, &panic_done_count);
rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
- if (!rv) {
- atomic_add(2, &panic_done_count);
- if (send_heartbeat_now)
- panic_halt_ipmi_heartbeat();
- } else
+ if (rv) {
+ atomic_sub(2, &panic_done_count);
printk(KERN_WARNING PFX
"Unable to extend the watchdog timeout.");
+ } else {
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ }
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
}
@@ -1164,7 +1167,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
if (code == SYS_POWER_OFF || code == SYS_HALT) {
/* Disable the WDT if we are shutting down. */
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- panic_halt_ipmi_set_timeout();
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/* Set a long timer to let the reboot happens, but
reboot if it hangs, but only if the watchdog
@@ -1172,7 +1175,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
timeout = 120;
pretimeout = 0;
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
- panic_halt_ipmi_set_timeout();
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
}
}
return NOTIFY_OK;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index f43485607063..a741e418b456 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -135,7 +135,6 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
/* if you have more than 8 printers, remember to increase LP_NO */
#define LP_NO 8
@@ -706,16 +705,13 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
{
unsigned int minor;
struct timeval par_timeout;
- struct compat_timeval __user *tc;
int ret;
minor = iminor(file->f_path.dentry->d_inode);
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
- tc = compat_ptr(arg);
- if (get_user(par_timeout.tv_sec, &tc->tv_sec) ||
- get_user(par_timeout.tv_usec, &tc->tv_usec)) {
+ if (compat_get_timeval(&par_timeout, compat_ptr(arg))) {
ret = -EFAULT;
break;
}
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 1aeaaba680d2..47ff7e470d87 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/sn/addrs.h>
#include <asm/sn/intr.h>
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 5c0d96a820fa..8b78750f1efe 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -44,7 +44,6 @@
#include <linux/slab.h>
#include <linux/numa.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 492dbfb2efd6..881c9e595939 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -56,7 +56,6 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/irq.h>
#include "smapi.h"
#include "mwavedd.h"
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index da3cfee782dc..9df78e2cc45d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -94,7 +94,7 @@
/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
* rtc_lock held. Due to the index-port/data-port design of the RTC, we
* don't want two different things trying to get to it at once. (e.g. the
- * periodic 11 min sync from time.c vs. this driver.)
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
*/
#include <linux/types.h>
@@ -111,7 +111,6 @@
#include <linux/uaccess.h>
#include <linux/mutex.h>
-#include <asm/system.h>
static DEFINE_MUTEX(nvram_mutex);
static DEFINE_SPINLOCK(nvram_state_lock);
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index bf586ae1ee83..d45c3345b4af 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -32,7 +32,6 @@
#include <asm/io.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
/*****************************************************************************/
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index f6453df4921c..0a484b4a1b02 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -60,7 +60,6 @@
#include <linux/ioctl.h>
#include <linux/synclink.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/dma.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 54ca8b23cde3..4ec04a754733 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1260,10 +1260,15 @@ static int proc_do_uuid(ctl_table *table, int write,
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
- uuid[8] = 0;
- }
- if (uuid[8] == 0)
generate_random_uuid(uuid);
+ } else {
+ static DEFINE_SPINLOCK(bootid_spinlock);
+
+ spin_lock(&bootid_spinlock);
+ if (!uuid[8])
+ generate_random_uuid(uuid);
+ spin_unlock(&bootid_spinlock);
+ }
sprintf(buf, "%pU", uuid);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index ccd124ab7ca7..af9437488b6c 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -57,8 +57,8 @@
* Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
* interrupts disabled. Due to the index-port/data-port (0x70/0x71)
* design of the RTC, we don't want two different things trying to
- * get to it at once. (e.g. the periodic 11 min sync from time.c vs.
- * this driver.)
+ * get to it at once. (e.g. the periodic 11 min sync from
+ * kernel/time/ntp.c vs. this driver.)
*/
#include <linux/interrupt.h>
@@ -83,7 +83,6 @@
#include <linux/ratelimit.h>
#include <asm/current.h>
-#include <asm/system.h>
#ifdef CONFIG_X86
#include <asm/hpet.h>
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 1ee8ce7d2762..45713f0e7d61 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -54,7 +54,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <linux/sonypi.h>
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 4dc019408fac..3b22a606f79d 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -194,17 +194,17 @@ static ssize_t srom_read(struct file *filp, char __user *buf,
hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
*f_pos, bytes_this_pass);
- if (hv_retval > 0) {
- if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
- retval = -EFAULT;
- break;
- }
- } else if (hv_retval <= 0) {
+ if (hv_retval <= 0) {
if (retval == 0)
retval = hv_retval;
break;
}
+ if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+
retval += hv_retval;
*f_pos += hv_retval;
buf += hv_retval;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 7fc75e47e6d0..a048199ce866 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -5,7 +5,6 @@
menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
- depends on EXPERIMENTAL
select SECURITYFS
---help---
If you have a TPM security chip in your system, which
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 32362cf35b8d..ad7c7320dd1b 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1221,12 +1221,13 @@ ssize_t tpm_read(struct file *file, char __user *buf,
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
+ ssize_t orig_ret_size = ret_size;
if (size < ret_size)
ret_size = size;
mutex_lock(&chip->buffer_mutex);
rc = copy_to_user(buf, chip->data_buffer, ret_size);
- memset(chip->data_buffer, 0, ret_size);
+ memset(chip->data_buffer, 0, orig_ret_size);
if (rc)
ret_size = -EFAULT;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 010547138281..b1c5280ac159 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -99,6 +99,8 @@ struct tpm_vendor_specific {
wait_queue_head_t int_queue;
};
+#define TPM_VID_INTEL 0x8086
+
struct tpm_chip {
struct device *dev; /* Device stuff */
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index a1748621111b..d2a70cae76df 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -76,7 +76,7 @@ enum tis_defaults {
#define TPM_RID(l) (0x0F04 | ((l) << 12))
static LIST_HEAD(tis_chips);
-static DEFINE_SPINLOCK(tis_lock);
+static DEFINE_MUTEX(tis_lock);
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
@@ -367,7 +367,12 @@ static int probe_itpm(struct tpm_chip *chip)
0x00, 0x00, 0x00, 0xf1
};
size_t len = sizeof(cmd_getticks);
- int rem_itpm = itpm;
+ bool rem_itpm = itpm;
+ u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
+
+ /* probe only iTPMS */
+ if (vendor != TPM_VID_INTEL)
+ return 0;
itpm = 0;
@@ -390,9 +395,6 @@ static int probe_itpm(struct tpm_chip *chip)
out:
itpm = rem_itpm;
tpm_tis_ready(chip);
- /* some TPMs need a break here otherwise they will not work
- * correctly on the immediately subsequent command */
- msleep(chip->vendor.timeout_b);
release_locality(chip, chip->vendor.locality, 0);
return rc;
@@ -508,7 +510,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
resource_size_t len, unsigned int irq)
{
u32 vendor, intfcaps, intmask;
- int rc, i, irq_s, irq_e;
+ int rc, i, irq_s, irq_e, probe;
struct tpm_chip *chip;
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -538,11 +540,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
if (!itpm) {
- itpm = probe_itpm(chip);
- if (itpm < 0) {
+ probe = probe_itpm(chip);
+ if (probe < 0) {
rc = -ENODEV;
goto out_err;
}
+ itpm = (probe == 0) ? 0 : 1;
}
if (itpm)
@@ -689,9 +692,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
}
INIT_LIST_HEAD(&chip->vendor.list);
- spin_lock(&tis_lock);
+ mutex_lock(&tis_lock);
list_add(&chip->vendor.list, &tis_chips);
- spin_unlock(&tis_lock);
+ mutex_unlock(&tis_lock);
return 0;
@@ -855,7 +858,7 @@ static void __exit cleanup_tis(void)
{
struct tpm_vendor_specific *i, *j;
struct tpm_chip *chip;
- spin_lock(&tis_lock);
+ mutex_lock(&tis_lock);
list_for_each_entry_safe(i, j, &tis_chips, list) {
chip = to_tpm_chip(i);
tpm_remove_hardware(chip->dev);
@@ -871,7 +874,7 @@ static void __exit cleanup_tis(void)
iounmap(i->iobase);
list_del(&i->list);
}
- spin_unlock(&tis_lock);
+ mutex_unlock(&tis_lock);
#ifdef CONFIG_PNP
if (!force) {
pnp_unregister_driver(&tis_pnp_driver);
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
deleted file mode 100644
index 8b34c65511eb..000000000000
--- a/drivers/char/viotape.c
+++ /dev/null
@@ -1,1041 +0,0 @@
-/* -*- linux-c -*-
- * drivers/char/viotape.c
- *
- * iSeries Virtual Tape
- *
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- * Stephen Rothwell
- *
- * (C) Copyright 2000-2004 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) anyu later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This routine provides access to tape drives owned and managed by an OS/400
- * partition running on the same box as this Linux partition.
- *
- * All tape operations are performed by sending messages back and forth to
- * the OS/400 partition. The format of the messages is defined in
- * iseries/vio.h
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/mtio.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/major.h>
-#include <linux/completion.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#include <asm/ioctls.h>
-#include <asm/firmware.h>
-#include <asm/vio.h>
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_call_event.h>
-#include <asm/iseries/hv_lp_config.h>
-
-#define VIOTAPE_VERSION "1.2"
-#define VIOTAPE_MAXREQ 1
-
-#define VIOTAPE_KERN_WARN KERN_WARNING "viotape: "
-#define VIOTAPE_KERN_INFO KERN_INFO "viotape: "
-
-static DEFINE_MUTEX(proc_viotape_mutex);
-static int viotape_numdev;
-
-/*
- * The minor number follows the conventions of the SCSI tape drives. The
- * rewind and mode are encoded in the minor #. We use this struct to break
- * them out
- */
-struct viot_devinfo_struct {
- int devno;
- int mode;
- int rewind;
-};
-
-#define VIOTAPOP_RESET 0
-#define VIOTAPOP_FSF 1
-#define VIOTAPOP_BSF 2
-#define VIOTAPOP_FSR 3
-#define VIOTAPOP_BSR 4
-#define VIOTAPOP_WEOF 5
-#define VIOTAPOP_REW 6
-#define VIOTAPOP_NOP 7
-#define VIOTAPOP_EOM 8
-#define VIOTAPOP_ERASE 9
-#define VIOTAPOP_SETBLK 10
-#define VIOTAPOP_SETDENSITY 11
-#define VIOTAPOP_SETPOS 12
-#define VIOTAPOP_GETPOS 13
-#define VIOTAPOP_SETPART 14
-#define VIOTAPOP_UNLOAD 15
-
-enum viotaperc {
- viotape_InvalidRange = 0x0601,
- viotape_InvalidToken = 0x0602,
- viotape_DMAError = 0x0603,
- viotape_UseError = 0x0604,
- viotape_ReleaseError = 0x0605,
- viotape_InvalidTape = 0x0606,
- viotape_InvalidOp = 0x0607,
- viotape_TapeErr = 0x0608,
-
- viotape_AllocTimedOut = 0x0640,
- viotape_BOTEnc = 0x0641,
- viotape_BlankTape = 0x0642,
- viotape_BufferEmpty = 0x0643,
- viotape_CleanCartFound = 0x0644,
- viotape_CmdNotAllowed = 0x0645,
- viotape_CmdNotSupported = 0x0646,
- viotape_DataCheck = 0x0647,
- viotape_DecompressErr = 0x0648,
- viotape_DeviceTimeout = 0x0649,
- viotape_DeviceUnavail = 0x064a,
- viotape_DeviceBusy = 0x064b,
- viotape_EndOfMedia = 0x064c,
- viotape_EndOfTape = 0x064d,
- viotape_EquipCheck = 0x064e,
- viotape_InsufficientRs = 0x064f,
- viotape_InvalidLogBlk = 0x0650,
- viotape_LengthError = 0x0651,
- viotape_LibDoorOpen = 0x0652,
- viotape_LoadFailure = 0x0653,
- viotape_NotCapable = 0x0654,
- viotape_NotOperational = 0x0655,
- viotape_NotReady = 0x0656,
- viotape_OpCancelled = 0x0657,
- viotape_PhyLinkErr = 0x0658,
- viotape_RdyNotBOT = 0x0659,
- viotape_TapeMark = 0x065a,
- viotape_WriteProt = 0x065b
-};
-
-static const struct vio_error_entry viotape_err_table[] = {
- { viotape_InvalidRange, EIO, "Internal error" },
- { viotape_InvalidToken, EIO, "Internal error" },
- { viotape_DMAError, EIO, "DMA error" },
- { viotape_UseError, EIO, "Internal error" },
- { viotape_ReleaseError, EIO, "Internal error" },
- { viotape_InvalidTape, EIO, "Invalid tape device" },
- { viotape_InvalidOp, EIO, "Invalid operation" },
- { viotape_TapeErr, EIO, "Tape error" },
- { viotape_AllocTimedOut, EBUSY, "Allocate timed out" },
- { viotape_BOTEnc, EIO, "Beginning of tape encountered" },
- { viotape_BlankTape, EIO, "Blank tape" },
- { viotape_BufferEmpty, EIO, "Buffer empty" },
- { viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" },
- { viotape_CmdNotAllowed, EIO, "Command not allowed" },
- { viotape_CmdNotSupported, EIO, "Command not supported" },
- { viotape_DataCheck, EIO, "Data check" },
- { viotape_DecompressErr, EIO, "Decompression error" },
- { viotape_DeviceTimeout, EBUSY, "Device timeout" },
- { viotape_DeviceUnavail, EIO, "Device unavailable" },
- { viotape_DeviceBusy, EBUSY, "Device busy" },
- { viotape_EndOfMedia, ENOSPC, "End of media" },
- { viotape_EndOfTape, ENOSPC, "End of tape" },
- { viotape_EquipCheck, EIO, "Equipment check" },
- { viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" },
- { viotape_InvalidLogBlk, EIO, "Invalid logical block location" },
- { viotape_LengthError, EOVERFLOW, "Length error" },
- { viotape_LibDoorOpen, EBUSY, "Door open" },
- { viotape_LoadFailure, ENOMEDIUM, "Load failure" },
- { viotape_NotCapable, EIO, "Not capable" },
- { viotape_NotOperational, EIO, "Not operational" },
- { viotape_NotReady, EIO, "Not ready" },
- { viotape_OpCancelled, EIO, "Operation cancelled" },
- { viotape_PhyLinkErr, EIO, "Physical link error" },
- { viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" },
- { viotape_TapeMark, EIO, "Tape mark" },
- { viotape_WriteProt, EROFS, "Write protection error" },
- { 0, 0, NULL },
-};
-
-/* Maximum number of tapes we support */
-#define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES
-#define MAX_PARTITIONS 4
-
-/* defines for current tape state */
-#define VIOT_IDLE 0
-#define VIOT_READING 1
-#define VIOT_WRITING 2
-
-/* Our info on the tapes */
-static struct {
- const char *rsrcname;
- const char *type;
- const char *model;
-} viotape_unitinfo[VIOTAPE_MAX_TAPE];
-
-static struct mtget viomtget[VIOTAPE_MAX_TAPE];
-
-static struct class *tape_class;
-
-static struct device *tape_device[VIOTAPE_MAX_TAPE];
-
-/*
- * maintain the current state of each tape (and partition)
- * so that we know when to write EOF marks.
- */
-static struct {
- unsigned char cur_part;
- unsigned char part_stat_rwi[MAX_PARTITIONS];
-} state[VIOTAPE_MAX_TAPE];
-
-/* We single-thread */
-static struct semaphore reqSem;
-
-/*
- * When we send a request, we use this struct to get the response back
- * from the interrupt handler
- */
-struct op_struct {
- void *buffer;
- dma_addr_t dmaaddr;
- size_t count;
- int rc;
- int non_blocking;
- struct completion com;
- struct device *dev;
- struct op_struct *next;
-};
-
-static spinlock_t op_struct_list_lock;
-static struct op_struct *op_struct_list;
-
-/* forward declaration to resolve interdependence */
-static int chg_state(int index, unsigned char new_state, struct file *file);
-
-/* procfs support */
-static int proc_viotape_show(struct seq_file *m, void *v)
-{
- int i;
-
- seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n");
- for (i = 0; i < viotape_numdev; i++) {
- seq_printf(m, "viotape device %d is iSeries resource %10.10s"
- "type %4.4s, model %3.3s\n",
- i, viotape_unitinfo[i].rsrcname,
- viotape_unitinfo[i].type,
- viotape_unitinfo[i].model);
- }
- return 0;
-}
-
-static int proc_viotape_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_viotape_show, NULL);
-}
-
-static const struct file_operations proc_viotape_operations = {
- .owner = THIS_MODULE,
- .open = proc_viotape_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/* Decode the device minor number into its parts */
-void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi)
-{
- devi->devno = iminor(ino) & 0x1F;
- devi->mode = (iminor(ino) & 0x60) >> 5;
- /* if bit is set in the minor, do _not_ rewind automatically */
- devi->rewind = (iminor(ino) & 0x80) == 0;
-}
-
-/* This is called only from the exit and init paths, so no need for locking */
-static void clear_op_struct_pool(void)
-{
- while (op_struct_list) {
- struct op_struct *toFree = op_struct_list;
- op_struct_list = op_struct_list->next;
- kfree(toFree);
- }
-}
-
-/* Likewise, this is only called from the init path */
-static int add_op_structs(int structs)
-{
- int i;
-
- for (i = 0; i < structs; ++i) {
- struct op_struct *new_struct =
- kmalloc(sizeof(*new_struct), GFP_KERNEL);
- if (!new_struct) {
- clear_op_struct_pool();
- return -ENOMEM;
- }
- new_struct->next = op_struct_list;
- op_struct_list = new_struct;
- }
- return 0;
-}
-
-/* Allocate an op structure from our pool */
-static struct op_struct *get_op_struct(void)
-{
- struct op_struct *retval;
- unsigned long flags;
-
- spin_lock_irqsave(&op_struct_list_lock, flags);
- retval = op_struct_list;
- if (retval)
- op_struct_list = retval->next;
- spin_unlock_irqrestore(&op_struct_list_lock, flags);
- if (retval) {
- memset(retval, 0, sizeof(*retval));
- init_completion(&retval->com);
- }
-
- return retval;
-}
-
-/* Return an op structure to our pool */
-static void free_op_struct(struct op_struct *op_struct)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&op_struct_list_lock, flags);
- op_struct->next = op_struct_list;
- op_struct_list = op_struct;
- spin_unlock_irqrestore(&op_struct_list_lock, flags);
-}
-
-/* Map our tape return codes to errno values */
-int tape_rc_to_errno(int tape_rc, char *operation, int tapeno)
-{
- const struct vio_error_entry *err;
-
- if (tape_rc == 0)
- return 0;
-
- err = vio_lookup_rc(viotape_err_table, tape_rc);
- printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n",
- operation, tape_rc, tapeno,
- viotape_unitinfo[tapeno].rsrcname, err->msg);
- return -err->errno;
-}
-
-/* Write */
-static ssize_t viotap_write(struct file *file, const char *buf,
- size_t count, loff_t * ppos)
-{
- HvLpEvent_Rc hvrc;
- unsigned short flags = file->f_flags;
- int noblock = ((flags & O_NONBLOCK) != 0);
- ssize_t ret;
- struct viot_devinfo_struct devi;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- /*
- * We need to make sure we can send a request. We use
- * a semaphore to keep track of # requests in use. If
- * we are non-blocking, make sure we don't block on the
- * semaphore
- */
- if (noblock) {
- if (down_trylock(&reqSem)) {
- ret = -EWOULDBLOCK;
- goto free_op;
- }
- } else
- down(&reqSem);
-
- /* Allocate a DMA buffer */
- op->dev = tape_device[devi.devno];
- op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
- GFP_ATOMIC);
-
- if (op->buffer == NULL) {
- printk(VIOTAPE_KERN_WARN
- "error allocating dma buffer for len %ld\n",
- count);
- ret = -EFAULT;
- goto up_sem;
- }
-
- /* Copy the data into the buffer */
- if (copy_from_user(op->buffer, buf, count)) {
- printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n");
- ret = -EFAULT;
- goto free_dma;
- }
-
- op->non_blocking = noblock;
- init_completion(&op->com);
- op->count = count;
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapewrite,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
- (int)hvrc);
- ret = -EIO;
- goto free_dma;
- }
-
- if (noblock)
- return count;
-
- wait_for_completion(&op->com);
-
- if (op->rc)
- ret = tape_rc_to_errno(op->rc, "write", devi.devno);
- else {
- chg_state(devi.devno, VIOT_WRITING, file);
- ret = op->count;
- }
-
-free_dma:
- dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
-up_sem:
- up(&reqSem);
-free_op:
- free_op_struct(op);
- return ret;
-}
-
-/* read */
-static ssize_t viotap_read(struct file *file, char *buf, size_t count,
- loff_t *ptr)
-{
- HvLpEvent_Rc hvrc;
- unsigned short flags = file->f_flags;
- struct op_struct *op = get_op_struct();
- int noblock = ((flags & O_NONBLOCK) != 0);
- ssize_t ret;
- struct viot_devinfo_struct devi;
-
- if (op == NULL)
- return -ENOMEM;
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- /*
- * We need to make sure we can send a request. We use
- * a semaphore to keep track of # requests in use. If
- * we are non-blocking, make sure we don't block on the
- * semaphore
- */
- if (noblock) {
- if (down_trylock(&reqSem)) {
- ret = -EWOULDBLOCK;
- goto free_op;
- }
- } else
- down(&reqSem);
-
- chg_state(devi.devno, VIOT_READING, file);
-
- /* Allocate a DMA buffer */
- op->dev = tape_device[devi.devno];
- op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
- GFP_ATOMIC);
- if (op->buffer == NULL) {
- ret = -EFAULT;
- goto up_sem;
- }
-
- op->count = count;
- init_completion(&op->com);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotaperead,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n",
- (int)hvrc);
- ret = -EIO;
- goto free_dma;
- }
-
- wait_for_completion(&op->com);
-
- if (op->rc)
- ret = tape_rc_to_errno(op->rc, "read", devi.devno);
- else {
- ret = op->count;
- if (ret && copy_to_user(buf, op->buffer, ret)) {
- printk(VIOTAPE_KERN_WARN "error on copy_to_user\n");
- ret = -EFAULT;
- }
- }
-
-free_dma:
- dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
-up_sem:
- up(&reqSem);
-free_op:
- free_op_struct(op);
- return ret;
-}
-
-/* ioctl */
-static int viotap_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- HvLpEvent_Rc hvrc;
- int ret;
- struct viot_devinfo_struct devi;
- struct mtop mtc;
- u32 myOp;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- down(&reqSem);
-
- ret = -EINVAL;
-
- switch (cmd) {
- case MTIOCTOP:
- ret = -EFAULT;
- /*
- * inode is null if and only if we (the kernel)
- * made the request
- */
- if (inode == NULL)
- memcpy(&mtc, (void *) arg, sizeof(struct mtop));
- else if (copy_from_user((char *)&mtc, (char *)arg,
- sizeof(struct mtop)))
- goto free_op;
-
- ret = -EIO;
- switch (mtc.mt_op) {
- case MTRESET:
- myOp = VIOTAPOP_RESET;
- break;
- case MTFSF:
- myOp = VIOTAPOP_FSF;
- break;
- case MTBSF:
- myOp = VIOTAPOP_BSF;
- break;
- case MTFSR:
- myOp = VIOTAPOP_FSR;
- break;
- case MTBSR:
- myOp = VIOTAPOP_BSR;
- break;
- case MTWEOF:
- myOp = VIOTAPOP_WEOF;
- break;
- case MTREW:
- myOp = VIOTAPOP_REW;
- break;
- case MTNOP:
- myOp = VIOTAPOP_NOP;
- break;
- case MTEOM:
- myOp = VIOTAPOP_EOM;
- break;
- case MTERASE:
- myOp = VIOTAPOP_ERASE;
- break;
- case MTSETBLK:
- myOp = VIOTAPOP_SETBLK;
- break;
- case MTSETDENSITY:
- myOp = VIOTAPOP_SETDENSITY;
- break;
- case MTTELL:
- myOp = VIOTAPOP_GETPOS;
- break;
- case MTSEEK:
- myOp = VIOTAPOP_SETPOS;
- break;
- case MTSETPART:
- myOp = VIOTAPOP_SETPART;
- break;
- case MTOFFL:
- myOp = VIOTAPOP_UNLOAD;
- break;
- default:
- printk(VIOTAPE_KERN_WARN "MTIOCTOP called "
- "with invalid op 0x%x\n", mtc.mt_op);
- goto free_op;
- }
-
- /*
- * if we moved the head, we are no longer
- * reading or writing
- */
- switch (mtc.mt_op) {
- case MTFSF:
- case MTBSF:
- case MTFSR:
- case MTBSR:
- case MTTELL:
- case MTSEEK:
- case MTREW:
- chg_state(devi.devno, VIOT_IDLE, file);
- }
-
- init_completion(&op->com);
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeop,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op,
- VIOVERSION << 16,
- ((u64)devi.devno << 48), 0,
- (((u64)myOp) << 32) | mtc.mt_count, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
- (int)hvrc);
- goto free_op;
- }
- wait_for_completion(&op->com);
- ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno);
- goto free_op;
-
- case MTIOCGET:
- ret = -EIO;
- init_completion(&op->com);
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapegetstatus,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
- (int)hvrc);
- goto free_op;
- }
- wait_for_completion(&op->com);
-
- /* Operation is complete - grab the error code */
- ret = tape_rc_to_errno(op->rc, "get status", devi.devno);
- free_op_struct(op);
- up(&reqSem);
-
- if ((ret == 0) && copy_to_user((void *)arg,
- &viomtget[devi.devno],
- sizeof(viomtget[0])))
- ret = -EFAULT;
- return ret;
- case MTIOCPOS:
- printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n");
- break;
- default:
- printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n",
- cmd);
- break;
- }
-
-free_op:
- free_op_struct(op);
- up(&reqSem);
- return ret;
-}
-
-static long viotap_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- long rc;
-
- mutex_lock(&proc_viotape_mutex);
- rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- mutex_unlock(&proc_viotape_mutex);
- return rc;
-}
-
-static int viotap_open(struct inode *inode, struct file *file)
-{
- HvLpEvent_Rc hvrc;
- struct viot_devinfo_struct devi;
- int ret;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
-
- mutex_lock(&proc_viotape_mutex);
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- /* Note: We currently only support one mode! */
- if ((devi.devno >= viotape_numdev) || (devi.mode)) {
- ret = -ENODEV;
- goto free_op;
- }
-
- init_completion(&op->com);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0, 0, 0);
- if (hvrc != 0) {
- printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
- (int) hvrc);
- ret = -EIO;
- goto free_op;
- }
-
- wait_for_completion(&op->com);
- ret = tape_rc_to_errno(op->rc, "open", devi.devno);
-
-free_op:
- free_op_struct(op);
- mutex_unlock(&proc_viotape_mutex);
- return ret;
-}
-
-
-static int viotap_release(struct inode *inode, struct file *file)
-{
- HvLpEvent_Rc hvrc;
- struct viot_devinfo_struct devi;
- int ret = 0;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
- init_completion(&op->com);
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- if (devi.devno >= viotape_numdev) {
- ret = -ENODEV;
- goto free_op;
- }
-
- chg_state(devi.devno, VIOT_IDLE, file);
-
- if (devi.rewind) {
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeop,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0,
- ((u64)VIOTAPOP_REW) << 32, 0);
- wait_for_completion(&op->com);
-
- tape_rc_to_errno(op->rc, "rewind", devi.devno);
- }
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeclose,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0, 0, 0);
- if (hvrc != 0) {
- printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
- (int) hvrc);
- ret = -EIO;
- goto free_op;
- }
-
- wait_for_completion(&op->com);
-
- if (op->rc)
- printk(VIOTAPE_KERN_WARN "close failed\n");
-
-free_op:
- free_op_struct(op);
- return ret;
-}
-
-const struct file_operations viotap_fops = {
- .owner = THIS_MODULE,
- .read = viotap_read,
- .write = viotap_write,
- .unlocked_ioctl = viotap_unlocked_ioctl,
- .open = viotap_open,
- .release = viotap_release,
- .llseek = noop_llseek,
-};
-
-/* Handle interrupt events for tape */
-static void vioHandleTapeEvent(struct HvLpEvent *event)
-{
- int tapeminor;
- struct op_struct *op;
- struct viotapelpevent *tevent = (struct viotapelpevent *)event;
-
- if (event == NULL) {
- /* Notification that a partition went away! */
- if (!viopath_isactive(viopath_hostLp)) {
- /* TODO! Clean up */
- }
- return;
- }
-
- tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
- op = (struct op_struct *)event->xCorrelationToken;
- switch (tapeminor) {
- case viotapeopen:
- case viotapeclose:
- op->rc = tevent->sub_type_result;
- complete(&op->com);
- break;
- case viotaperead:
- op->rc = tevent->sub_type_result;
- op->count = tevent->len;
- complete(&op->com);
- break;
- case viotapewrite:
- if (op->non_blocking) {
- dma_free_coherent(op->dev, op->count,
- op->buffer, op->dmaaddr);
- free_op_struct(op);
- up(&reqSem);
- } else {
- op->rc = tevent->sub_type_result;
- op->count = tevent->len;
- complete(&op->com);
- }
- break;
- case viotapeop:
- case viotapegetpos:
- case viotapesetpos:
- case viotapegetstatus:
- if (op) {
- op->count = tevent->u.op.count;
- op->rc = tevent->sub_type_result;
- if (!op->non_blocking)
- complete(&op->com);
- }
- break;
- default:
- printk(VIOTAPE_KERN_WARN "weird ack\n");
- }
-}
-
-static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
-{
- int i = vdev->unit_address;
- int j;
- struct device_node *node = vdev->dev.of_node;
-
- if (i >= VIOTAPE_MAX_TAPE)
- return -ENODEV;
- if (!node)
- return -ENODEV;
-
- if (i >= viotape_numdev)
- viotape_numdev = i + 1;
-
- tape_device[i] = &vdev->dev;
- viotape_unitinfo[i].rsrcname = of_get_property(node,
- "linux,vio_rsrcname", NULL);
- viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type",
- NULL);
- viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model",
- NULL);
-
- state[i].cur_part = 0;
- for (j = 0; j < MAX_PARTITIONS; ++j)
- state[i].part_stat_rwi[j] = VIOT_IDLE;
- device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
- "iseries!vt%d", i);
- device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL,
- "iseries!nvt%d", i);
- printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
- "resource %10.10s type %4.4s, model %3.3s\n",
- i, viotape_unitinfo[i].rsrcname,
- viotape_unitinfo[i].type, viotape_unitinfo[i].model);
- return 0;
-}
-
-static int viotape_remove(struct vio_dev *vdev)
-{
- int i = vdev->unit_address;
-
- device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
- device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
- return 0;
-}
-
-/**
- * viotape_device_table: Used by vio.c to match devices that we
- * support.
- */
-static struct vio_device_id viotape_device_table[] __devinitdata = {
- { "byte", "IBM,iSeries-viotape" },
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, viotape_device_table);
-
-static struct vio_driver viotape_driver = {
- .id_table = viotape_device_table,
- .probe = viotape_probe,
- .remove = viotape_remove,
- .driver = {
- .name = "viotape",
- .owner = THIS_MODULE,
- }
-};
-
-
-int __init viotap_init(void)
-{
- int ret;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
- op_struct_list = NULL;
- if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) {
- printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n");
- return ret;
- }
- spin_lock_init(&op_struct_list_lock);
-
- sema_init(&reqSem, VIOTAPE_MAXREQ);
-
- if (viopath_hostLp == HvLpIndexInvalid) {
- vio_set_hostlp();
- if (viopath_hostLp == HvLpIndexInvalid) {
- ret = -ENODEV;
- goto clear_op;
- }
- }
-
- ret = viopath_open(viopath_hostLp, viomajorsubtype_tape,
- VIOTAPE_MAXREQ + 2);
- if (ret) {
- printk(VIOTAPE_KERN_WARN
- "error on viopath_open to hostlp %d\n", ret);
- ret = -EIO;
- goto clear_op;
- }
-
- printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION
- ", hosting partition %d\n", viopath_hostLp);
-
- vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent);
-
- ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops);
- if (ret < 0) {
- printk(VIOTAPE_KERN_WARN "Error registering viotape device\n");
- goto clear_handler;
- }
-
- tape_class = class_create(THIS_MODULE, "tape");
- if (IS_ERR(tape_class)) {
- printk(VIOTAPE_KERN_WARN "Unable to allocate class\n");
- ret = PTR_ERR(tape_class);
- goto unreg_chrdev;
- }
-
- ret = vio_register_driver(&viotape_driver);
- if (ret)
- goto unreg_class;
-
- proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
- &proc_viotape_operations);
-
- return 0;
-
-unreg_class:
- class_destroy(tape_class);
-unreg_chrdev:
- unregister_chrdev(VIOTAPE_MAJOR, "viotape");
-clear_handler:
- vio_clearHandler(viomajorsubtype_tape);
- viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
-clear_op:
- clear_op_struct_pool();
- return ret;
-}
-
-/* Give a new state to the tape object */
-static int chg_state(int index, unsigned char new_state, struct file *file)
-{
- unsigned char *cur_state =
- &state[index].part_stat_rwi[state[index].cur_part];
- int rc = 0;
-
- /* if the same state, don't bother */
- if (*cur_state == new_state)
- return 0;
-
- /* write an EOF if changing from writing to some other state */
- if (*cur_state == VIOT_WRITING) {
- struct mtop write_eof = { MTWEOF, 1 };
-
- rc = viotap_ioctl(NULL, file, MTIOCTOP,
- (unsigned long)&write_eof);
- }
- *cur_state = new_state;
- return rc;
-}
-
-/* Cleanup */
-static void __exit viotap_exit(void)
-{
- remove_proc_entry("iSeries/viotape", NULL);
- vio_unregister_driver(&viotape_driver);
- class_destroy(tape_class);
- unregister_chrdev(VIOTAPE_MAJOR, "viotape");
- viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
- vio_clearHandler(viomajorsubtype_tape);
- clear_op_struct_pool();
-}
-
-MODULE_LICENSE("GPL");
-module_init(viotap_init);
-module_exit(viotap_exit);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b58b56187065..ddf86b6500b7 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1038,12 +1038,6 @@ static struct attribute_group port_attribute_group = {
.attrs = port_sysfs_entries,
};
-static int debugfs_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
@@ -1087,7 +1081,7 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
static const struct file_operations port_debugfs_ops = {
.owner = THIS_MODULE,
- .open = debugfs_open,
+ .open = simple_open,
.read = debugfs_read,
};
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index e90e1c74fd4c..31ba11ca75e1 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -89,7 +89,6 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#ifdef CONFIG_OF
/* For open firmware. */