summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/cris/arch-v10/lib/usercopy.c14
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig8
-rw-r--r--arch/cris/arch-v32/drivers/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/i2c.h1
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c1430
-rw-r--r--arch/cris/arch-v32/kernel/debugport.c82
-rw-r--r--arch/cris/arch-v32/kernel/time.c29
-rw-r--r--arch/cris/arch-v32/lib/usercopy.c15
-rw-r--r--arch/cris/arch-v32/mach-fs/pinmux.c152
-rw-r--r--arch/cris/include/arch-v32/mach-fs/mach/pinmux.h2
-rw-r--r--arch/cris/include/asm/Kbuild4
-rw-r--r--arch/cris/include/uapi/asm/Kbuild4
-rw-r--r--arch/cris/kernel/crisksyms.c9
-rw-r--r--arch/cris/kernel/traps.c61
-rw-r--r--arch/cris/mm/init.c38
-rw-r--r--arch/cris/mm/ioremap.c3
-rw-r--r--arch/hexagon/include/asm/cache.h4
-rw-r--r--arch/hexagon/include/asm/cacheflush.h36
-rw-r--r--arch/hexagon/include/asm/io.h5
-rw-r--r--arch/hexagon/kernel/setup.c1
-rw-r--r--arch/hexagon/kernel/traps.c4
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S4
-rw-r--r--arch/hexagon/mm/cache.c10
-rw-r--r--arch/hexagon/mm/ioremap.c1
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/powerpc/include/asm/cpuidle.h20
-rw-r--r--arch/powerpc/include/asm/opal.h42
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/syscall.h6
-rw-r--r--arch/powerpc/include/asm/uaccess.h6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c11
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S35
-rw-r--r--arch/powerpc/kernel/idle_power7.S344
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/perf/hv-24x7.c23
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S39
-rw-r--r--arch/powerpc/platforms/powernv/opal.c50
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c166
-rw-r--r--arch/powerpc/platforms/powernv/smp.c29
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c34
-rw-r--r--arch/powerpc/platforms/powernv/subcore.h9
-rw-r--r--arch/x86/Kconfig36
-rw-r--r--arch/x86/include/asm/hw_irq.h84
-rw-r--r--arch/x86/include/asm/io_apic.h35
-rw-r--r--arch/x86/include/asm/irq_vectors.h6
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/uapi/asm/ldt.h7
-rw-r--r--arch/x86/kernel/acpi/boot.c99
-rw-r--r--arch/x86/kernel/apic/Makefile4
-rw-r--r--arch/x86/kernel/apic/apic.c22
-rw-r--r--arch/x86/kernel/apic/htirq.c107
-rw-r--r--arch/x86/kernel/apic/io_apic.c1356
-rw-r--r--arch/x86/kernel/apic/msi.c286
-rw-r--r--arch/x86/kernel/apic/vector.c719
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c22
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/reboot.c1
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kernel/tls.c6
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/pci/intel_mid_pci.c10
-rw-r--r--arch/x86/pci/irq.c25
-rw-r--r--arch/x86/platform/uv/uv_irq.c6
74 files changed, 3471 insertions, 2189 deletions
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c
index b0a608da7bd1..b964c667aced 100644
--- a/arch/cris/arch-v10/lib/usercopy.c
+++ b/arch/cris/arch-v10/lib/usercopy.c
@@ -30,8 +30,7 @@
/* Copy to userspace. This is based on the memcpy used for
kernel-to-kernel copying; see "string.c". */
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -187,13 +186,14 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__copy_user);
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -369,11 +369,10 @@ copy_exception_bytes:
return retn + n;
}
+EXPORT_SYMBOL(__copy_user_zeroing);
/* Zero userspace. */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -521,3 +520,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 15a9ed1d579c..4fc16b44fff2 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -108,6 +108,7 @@ config ETRAX_AXISFLASHMAP
select MTD_JEDECPROBE
select MTD_BLOCK
select MTD_COMPLEX_MAPPINGS
+ select MTD_MTDRAM
help
This option enables MTD mapping of flash devices. Needed to use
flash memories. If unsure, say Y.
@@ -358,13 +359,6 @@ config ETRAX_SPI_MMC
default MMC
select SPI
select MMC_SPI
- select ETRAX_SPI_MMC_BOARD
-
-# For the parts that can't be a module (due to restrictions in
-# framework elsewhere).
-config ETRAX_SPI_MMC_BOARD
- boolean
- default n
# While the board info is MMC_SPI only, the drivers are written to be
# independent of MMC_SPI, so we'll keep SPI non-dependent on the
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
index 39aa3c117a86..15fbfefced2c 100644
--- a/arch/cris/arch-v32/drivers/Makefile
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -10,4 +10,3 @@ obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o
obj-$(CONFIG_ETRAX_I2C) += i2c.o
obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
obj-$(CONFIG_PCI) += pci/
-obj-$(CONFIG_ETRAX_SPI_MMC_BOARD) += board_mmcspi.o
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
index c073cf4ba016..d9cc856f89fb 100644
--- a/arch/cris/arch-v32/drivers/i2c.h
+++ b/arch/cris/arch-v32/drivers/i2c.h
@@ -2,7 +2,6 @@
#include <linux/init.h>
/* High level I2C actions */
-int __init i2c_init(void);
int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 5a149134cfb5..08a313fc2241 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1,8 +1,7 @@
/*
- * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
- *
- * Copyright (c) 2005 Axis Communications AB
+ * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
*
+ * Copyright (c) 2005, 2008 Axis Communications AB
* Author: Mikael Starvik
*
*/
@@ -16,16 +15,17 @@
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/wait.h>
#include <asm/io.h>
-#include <dma.h>
+#include <mach/dma.h>
#include <pinmux.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/sser_defs.h>
+#include <hwregs/timer_defs.h>
#include <hwregs/dma_defs.h>
#include <hwregs/dma.h>
#include <hwregs/intr_vect_defs.h>
@@ -59,22 +59,23 @@
/* the rest of the data pointed out by Descr1 and set readp to the start */
/* of Descr2 */
-#define SYNC_SERIAL_MAJOR 125
-
/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
/* words can be handled */
-#define IN_BUFFER_SIZE 12288
-#define IN_DESCR_SIZE 256
-#define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
+#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
+#define NBR_IN_DESCR (8*6)
+#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
-#define OUT_BUFFER_SIZE 1024*8
#define NBR_OUT_DESCR 8
+#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
#define DEFAULT_FRAME_RATE 0
#define DEFAULT_WORD_RATE 7
+/* To be removed when we move to pure udev. */
+#define SYNC_SERIAL_MAJOR 125
+
/* NOTE: Enabling some debug will likely cause overrun or underrun,
- * especially if manual mode is use.
+ * especially if manual mode is used.
*/
#define DEBUG(x)
#define DEBUGREAD(x)
@@ -85,11 +86,28 @@
#define DEBUGTRDMA(x)
#define DEBUGOUTBUF(x)
-typedef struct sync_port
-{
- reg_scope_instances regi_sser;
- reg_scope_instances regi_dmain;
- reg_scope_instances regi_dmaout;
+enum syncser_irq_setup {
+ no_irq_setup = 0,
+ dma_irq_setup = 1,
+ manual_irq_setup = 2,
+};
+
+struct sync_port {
+ unsigned long regi_sser;
+ unsigned long regi_dmain;
+ unsigned long regi_dmaout;
+
+ /* Interrupt vectors. */
+ unsigned long dma_in_intr_vect; /* Used for DMA in. */
+ unsigned long dma_out_intr_vect; /* Used for DMA out. */
+ unsigned long syncser_intr_vect; /* Used when no DMA. */
+
+ /* DMA number for in and out. */
+ unsigned int dma_in_nbr;
+ unsigned int dma_out_nbr;
+
+ /* DMA owner. */
+ enum dma_owner req_dma;
char started; /* 1 if port has been started */
char port_nbr; /* Port 0 or 1 */
@@ -99,22 +117,29 @@ typedef struct sync_port
char use_dma; /* 1 if port uses dma */
char tr_running;
- char init_irqs;
+ enum syncser_irq_setup init_irqs;
int output;
int input;
/* Next byte to be read by application */
- volatile unsigned char *volatile readp;
+ unsigned char *readp;
/* Next byte to be written by etrax */
- volatile unsigned char *volatile writep;
+ unsigned char *writep;
unsigned int in_buffer_size;
+ unsigned int in_buffer_len;
unsigned int inbufchunk;
- unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
- unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
- unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
- struct dma_descr_data* next_rx_desc;
- struct dma_descr_data* prev_rx_desc;
+ /* Data buffers for in and output. */
+ unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
+ unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
+ unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
+ struct timespec timestamp[NBR_IN_DESCR];
+ struct dma_descr_data *next_rx_desc;
+ struct dma_descr_data *prev_rx_desc;
+
+ struct timeval last_timestamp;
+ int read_ts_idx;
+ int write_ts_idx;
/* Pointer to the first available descriptor in the ring,
* unless active_tr_descr == catch_tr_descr and a dma
@@ -135,114 +160,138 @@ typedef struct sync_port
/* Number of bytes currently locked for being read by DMA */
int out_buf_count;
- dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
- dma_descr_context in_context __attribute__ ((__aligned__(32)));
- dma_descr_data out_descr[NBR_OUT_DESCR]
- __attribute__ ((__aligned__(16)));
- dma_descr_context out_context __attribute__ ((__aligned__(32)));
+ dma_descr_context in_context __aligned(32);
+ dma_descr_context out_context __aligned(32);
+ dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
+ dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
+
wait_queue_head_t out_wait_q;
wait_queue_head_t in_wait_q;
spinlock_t lock;
-} sync_port;
+};
static DEFINE_MUTEX(sync_serial_mutex);
static int etrax_sync_serial_init(void);
static void initialize_port(int portnbr);
static inline int sync_data_avail(struct sync_port *port);
-static int sync_serial_open(struct inode *, struct file*);
-static int sync_serial_release(struct inode*, struct file*);
+static int sync_serial_open(struct inode *, struct file *);
+static int sync_serial_release(struct inode *, struct file *);
static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
-static int sync_serial_ioctl(struct file *,
- unsigned int cmd, unsigned long arg);
-static ssize_t sync_serial_write(struct file * file, const char * buf,
+static long sync_serial_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int sync_serial_ioctl_unlocked(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
-static ssize_t sync_serial_read(struct file *file, char *buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
- defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
- (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
- defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
+#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
+ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
+ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
+ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
#define SYNC_SER_DMA
+#else
+#define SYNC_SER_MANUAL
#endif
-static void send_word(sync_port* port);
-static void start_dma_out(struct sync_port *port, const char *data, int count);
-static void start_dma_in(sync_port* port);
#ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count);
+static void start_dma_in(struct sync_port *port);
static irqreturn_t tr_interrupt(int irq, void *dev_id);
static irqreturn_t rx_interrupt(int irq, void *dev_id);
#endif
-
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
- !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
- (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
- !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
-#define SYNC_SER_MANUAL
-#endif
#ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port);
static irqreturn_t manual_interrupt(int irq, void *dev_id);
#endif
-#ifdef CONFIG_ETRAXFS /* ETRAX FS */
-#define OUT_DMA_NBR 4
-#define IN_DMA_NBR 5
-#define PINMUX_SSER pinmux_sser0
-#define SYNCSER_INST regi_sser0
-#define SYNCSER_INTR_VECT SSER0_INTR_VECT
-#define OUT_DMA_INST regi_dma4
-#define IN_DMA_INST regi_dma5
-#define DMA_OUT_INTR_VECT DMA4_INTR_VECT
-#define DMA_IN_INTR_VECT DMA5_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser0
-#else /* Artpec-3 */
-#define OUT_DMA_NBR 6
-#define IN_DMA_NBR 7
-#define PINMUX_SSER pinmux_sser
-#define SYNCSER_INST regi_sser
-#define SYNCSER_INTR_VECT SSER_INTR_VECT
-#define OUT_DMA_INST regi_dma6
-#define IN_DMA_INST regi_dma7
-#define DMA_OUT_INTR_VECT DMA6_INTR_VECT
-#define DMA_IN_INTR_VECT DMA7_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser
+#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
+#define artpec_request_dma crisv32_request_dma
+#define artpec_free_dma crisv32_free_dma
+
+#ifdef CONFIG_ETRAXFS
+/* ETRAX FS */
+#define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR
+#define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR
+#define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR
+#define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR
+#define PINMUX_SSER0 pinmux_sser0
+#define PINMUX_SSER1 pinmux_sser1
+#define SYNCSER_INST0 regi_sser0
+#define SYNCSER_INST1 regi_sser1
+#define SYNCSER_INTR_VECT0 SSER0_INTR_VECT
+#define SYNCSER_INTR_VECT1 SSER1_INTR_VECT
+#define OUT_DMA_INST0 regi_dma4
+#define IN_DMA_INST0 regi_dma5
+#define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT
+#define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT
+#define DMA_IN_INTR_VECT0 DMA5_INTR_VECT
+#define DMA_IN_INTR_VECT1 DMA6_INTR_VECT
+#define REQ_DMA_SYNCSER0 dma_sser0
+#define REQ_DMA_SYNCSER1 dma_sser1
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
+#define PORT1_DMA 1
+#else
+#define PORT1_DMA 0
+#endif
+#elif defined(CONFIG_CRIS_MACH_ARTPEC3)
+/* ARTPEC-3 */
+#define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR
+#define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR
+#define PINMUX_SSER0 pinmux_sser
+#define SYNCSER_INST0 regi_sser
+#define SYNCSER_INTR_VECT0 SSER_INTR_VECT
+#define OUT_DMA_INST0 regi_dma6
+#define IN_DMA_INST0 regi_dma7
+#define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT
+#define DMA_IN_INTR_VECT0 DMA7_INTR_VECT
+#define REQ_DMA_SYNCSER0 dma_sser
+#define REQ_DMA_SYNCSER1 dma_sser
#endif
-/* The ports */
-static struct sync_port ports[]=
-{
- {
- .regi_sser = SYNCSER_INST,
- .regi_dmaout = OUT_DMA_INST,
- .regi_dmain = IN_DMA_INST,
#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
- .use_dma = 1,
+#define PORT0_DMA 1
#else
- .use_dma = 0,
+#define PORT0_DMA 0
#endif
- }
-#ifdef CONFIG_ETRAXFS
- ,
+/* The ports */
+static struct sync_port ports[] = {
{
- .regi_sser = regi_sser1,
- .regi_dmaout = regi_dma6,
- .regi_dmain = regi_dma7,
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
- .use_dma = 1,
-#else
- .use_dma = 0,
-#endif
- }
+ .regi_sser = SYNCSER_INST0,
+ .regi_dmaout = OUT_DMA_INST0,
+ .regi_dmain = IN_DMA_INST0,
+ .use_dma = PORT0_DMA,
+ .dma_in_intr_vect = DMA_IN_INTR_VECT0,
+ .dma_out_intr_vect = DMA_OUT_INTR_VECT0,
+ .dma_in_nbr = DMA_IN_NBR0,
+ .dma_out_nbr = DMA_OUT_NBR0,
+ .req_dma = REQ_DMA_SYNCSER0,
+ .syncser_intr_vect = SYNCSER_INTR_VECT0,
+ },
+#ifdef CONFIG_ETRAXFS
+ {
+ .regi_sser = SYNCSER_INST1,
+ .regi_dmaout = regi_dma6,
+ .regi_dmain = regi_dma7,
+ .use_dma = PORT1_DMA,
+ .dma_in_intr_vect = DMA_IN_INTR_VECT1,
+ .dma_out_intr_vect = DMA_OUT_INTR_VECT1,
+ .dma_in_nbr = DMA_IN_NBR1,
+ .dma_out_nbr = DMA_OUT_NBR1,
+ .req_dma = REQ_DMA_SYNCSER1,
+ .syncser_intr_vect = SYNCSER_INTR_VECT1,
+ },
#endif
};
#define NBR_PORTS ARRAY_SIZE(ports)
-static const struct file_operations sync_serial_fops = {
+static const struct file_operations syncser_fops = {
.owner = THIS_MODULE,
.write = sync_serial_write,
.read = sync_serial_read,
@@ -253,61 +302,40 @@ static const struct file_operations sync_serial_fops = {
.llseek = noop_llseek,
};
-static int __init etrax_sync_serial_init(void)
-{
- ports[0].enabled = 0;
-#ifdef CONFIG_ETRAXFS
- ports[1].enabled = 0;
-#endif
- if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
- &sync_serial_fops) < 0) {
- printk(KERN_WARNING
- "Unable to get major for synchronous serial port\n");
- return -EBUSY;
- }
-
- /* Initialize Ports */
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
- if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
- printk(KERN_WARNING
- "Unable to alloc pins for synchronous serial port 0\n");
- return -EIO;
- }
- ports[0].enabled = 1;
- initialize_port(0);
-#endif
-
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
- if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
- printk(KERN_WARNING
- "Unable to alloc pins for synchronous serial port 0\n");
- return -EIO;
- }
- ports[1].enabled = 1;
- initialize_port(1);
-#endif
+static dev_t syncser_first;
+static int minor_count = NBR_PORTS;
+#define SYNCSER_NAME "syncser"
+static struct cdev *syncser_cdev;
+static struct class *syncser_class;
-#ifdef CONFIG_ETRAXFS
- printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
-#else
- printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
-#endif
- return 0;
+static void sync_serial_start_port(struct sync_port *port)
+{
+ reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
+ reg_sser_rw_tr_cfg tr_cfg =
+ REG_RD(sser, port->regi_sser, rw_tr_cfg);
+ reg_sser_rw_rec_cfg rec_cfg =
+ REG_RD(sser, port->regi_sser, rw_rec_cfg);
+ cfg.en = regk_sser_yes;
+ tr_cfg.tr_en = regk_sser_yes;
+ rec_cfg.rec_en = regk_sser_yes;
+ REG_WR(sser, port->regi_sser, rw_cfg, cfg);
+ REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
+ REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
+ port->started = 1;
}
static void __init initialize_port(int portnbr)
{
- int __attribute__((unused)) i;
struct sync_port *port = &ports[portnbr];
- reg_sser_rw_cfg cfg = {0};
- reg_sser_rw_frm_cfg frm_cfg = {0};
- reg_sser_rw_tr_cfg tr_cfg = {0};
- reg_sser_rw_rec_cfg rec_cfg = {0};
+ reg_sser_rw_cfg cfg = { 0 };
+ reg_sser_rw_frm_cfg frm_cfg = { 0 };
+ reg_sser_rw_tr_cfg tr_cfg = { 0 };
+ reg_sser_rw_rec_cfg rec_cfg = { 0 };
- DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
+ DEBUG(pr_info("Init sync serial port %d\n", portnbr));
port->port_nbr = portnbr;
- port->init_irqs = 1;
+ port->init_irqs = no_irq_setup;
port->out_rd_ptr = port->out_buffer;
port->out_buf_count = 0;
@@ -318,10 +346,11 @@ static void __init initialize_port(int portnbr)
port->readp = port->flip;
port->writep = port->flip;
port->in_buffer_size = IN_BUFFER_SIZE;
+ port->in_buffer_len = 0;
port->inbufchunk = IN_DESCR_SIZE;
- port->next_rx_desc = &port->in_descr[0];
- port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
- port->prev_rx_desc->eol = 1;
+
+ port->read_ts_idx = 0;
+ port->write_ts_idx = 0;
init_waitqueue_head(&port->out_wait_q);
init_waitqueue_head(&port->in_wait_q);
@@ -368,14 +397,18 @@ static void __init initialize_port(int portnbr)
REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
#ifdef SYNC_SER_DMA
- /* Setup the descriptor ring for dma out/transmit. */
- for (i = 0; i < NBR_OUT_DESCR; i++) {
- port->out_descr[i].wait = 0;
- port->out_descr[i].intr = 1;
- port->out_descr[i].eol = 0;
- port->out_descr[i].out_eop = 0;
- port->out_descr[i].next =
- (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
+ {
+ int i;
+ /* Setup the descriptor ring for dma out/transmit. */
+ for (i = 0; i < NBR_OUT_DESCR; i++) {
+ dma_descr_data *descr = &port->out_descr[i];
+ descr->wait = 0;
+ descr->intr = 1;
+ descr->eol = 0;
+ descr->out_eop = 0;
+ descr->next =
+ (dma_descr_data *)virt_to_phys(&descr[i+1]);
+ }
}
/* Create a ring from the list. */
@@ -391,201 +424,116 @@ static void __init initialize_port(int portnbr)
static inline int sync_data_avail(struct sync_port *port)
{
- int avail;
- unsigned char *start;
- unsigned char *end;
-
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- /* 0123456789 0123456789
- * ----- - -----
- * ^rp ^wp ^wp ^rp
- */
-
- if (end >= start)
- avail = end - start;
- else
- avail = port->in_buffer_size - (start - end);
- return avail;
-}
-
-static inline int sync_data_avail_to_end(struct sync_port *port)
-{
- int avail;
- unsigned char *start;
- unsigned char *end;
-
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- /* 0123456789 0123456789
- * ----- -----
- * ^rp ^wp ^wp ^rp
- */
-
- if (end >= start)
- avail = end - start;
- else
- avail = port->flip + port->in_buffer_size - start;
- return avail;
+ return port->in_buffer_len;
}
static int sync_serial_open(struct inode *inode, struct file *file)
{
+ int ret = 0;
int dev = iminor(inode);
- int ret = -EBUSY;
- sync_port *port;
- reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
- reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
+ struct sync_port *port;
+#ifdef SYNC_SER_DMA
+ reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
+ reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
+#endif
- mutex_lock(&sync_serial_mutex);
- DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
+ DEBUG(pr_debug("Open sync serial port %d\n", dev));
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
- ret = -ENODEV;
- goto out;
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
+ return -ENODEV;
}
port = &ports[dev];
/* Allow open this device twice (assuming one reader and one writer) */
- if (port->busy == 2)
- {
- DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
- goto out;
+ if (port->busy == 2) {
+ DEBUG(pr_info("syncser%d is busy\n", dev));
+ return -EBUSY;
}
+ mutex_lock(&sync_serial_mutex);
- if (port->init_irqs) {
- if (port->use_dma) {
- if (port == &ports[0]) {
-#ifdef SYNC_SER_DMA
- if (request_irq(DMA_OUT_INTR_VECT,
- tr_interrupt,
- 0,
- "synchronous serial 0 dma tr",
- &ports[0])) {
- printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
- goto out;
- } else if (request_irq(DMA_IN_INTR_VECT,
- rx_interrupt,
- 0,
- "synchronous serial 1 dma rx",
- &ports[0])) {
- free_irq(DMA_OUT_INTR_VECT, &port[0]);
- printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
- goto out;
- } else if (crisv32_request_dma(OUT_DMA_NBR,
- "synchronous serial 0 dma tr",
- DMA_VERBOSE_ON_ERROR,
- 0,
- REQ_DMA_SYNCSER)) {
- free_irq(DMA_OUT_INTR_VECT, &port[0]);
- free_irq(DMA_IN_INTR_VECT, &port[0]);
- printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
- goto out;
- } else if (crisv32_request_dma(IN_DMA_NBR,
- "synchronous serial 0 dma rec",
- DMA_VERBOSE_ON_ERROR,
- 0,
- REQ_DMA_SYNCSER)) {
- crisv32_free_dma(OUT_DMA_NBR);
- free_irq(DMA_OUT_INTR_VECT, &port[0]);
- free_irq(DMA_IN_INTR_VECT, &port[0]);
- printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
- goto out;
- }
-#endif
- }
-#ifdef CONFIG_ETRAXFS
- else if (port == &ports[1]) {
+ /* Clear any stale date left in the flip buffer */
+ port->readp = port->writep = port->flip;
+ port->in_buffer_len = 0;
+ port->read_ts_idx = 0;
+ port->write_ts_idx = 0;
+
+ if (port->init_irqs != no_irq_setup) {
+ /* Init only on first call. */
+ port->busy++;
+ mutex_unlock(&sync_serial_mutex);
+ return 0;
+ }
+ if (port->use_dma) {
#ifdef SYNC_SER_DMA
- if (request_irq(DMA6_INTR_VECT,
- tr_interrupt,
- 0,
- "synchronous serial 1 dma tr",
- &ports[1])) {
- printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
- goto out;
- } else if (request_irq(DMA7_INTR_VECT,
- rx_interrupt,
- 0,
- "synchronous serial 1 dma rx",
- &ports[1])) {
- free_irq(DMA6_INTR_VECT, &ports[1]);
- printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
- goto out;
- } else if (crisv32_request_dma(
- SYNC_SER1_TX_DMA_NBR,
- "synchronous serial 1 dma tr",
- DMA_VERBOSE_ON_ERROR,
- 0,
- dma_sser1)) {
- free_irq(DMA6_INTR_VECT, &ports[1]);
- free_irq(DMA7_INTR_VECT, &ports[1]);
- printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
- goto out;
- } else if (crisv32_request_dma(
- SYNC_SER1_RX_DMA_NBR,
- "synchronous serial 3 dma rec",
- DMA_VERBOSE_ON_ERROR,
- 0,
- dma_sser1)) {
- crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
- free_irq(DMA6_INTR_VECT, &ports[1]);
- free_irq(DMA7_INTR_VECT, &ports[1]);
- printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
- goto out;
- }
-#endif
- }
+ const char *tmp;
+ DEBUG(pr_info("Using DMA for syncser%d\n", dev));
+
+ tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
+ if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
+ tmp, port)) {
+ pr_err("Can't alloc syncser%d TX IRQ", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ if (artpec_request_dma(port->dma_out_nbr, tmp,
+ DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+ free_irq(port->dma_out_intr_vect, port);
+ pr_err("Can't alloc syncser%d TX DMA", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
+ if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
+ tmp, port)) {
+ artpec_free_dma(port->dma_out_nbr);
+ free_irq(port->dma_out_intr_vect, port);
+ pr_err("Can't alloc syncser%d RX IRQ", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ if (artpec_request_dma(port->dma_in_nbr, tmp,
+ DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+ artpec_free_dma(port->dma_out_nbr);
+ free_irq(port->dma_out_intr_vect, port);
+ free_irq(port->dma_in_intr_vect, port);
+ pr_err("Can't alloc syncser%d RX DMA", dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ /* Enable DMAs */
+ REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
+ REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
+ /* Enable DMA IRQs */
+ REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
+ REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
+ /* Set up wordsize = 1 for DMAs. */
+ DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
+ DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
+
+ start_dma_in(port);
+ port->init_irqs = dma_irq_setup;
#endif
- /* Enable DMAs */
- REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
- REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
- /* Enable DMA IRQs */
- REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
- REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
- /* Set up wordsize = 1 for DMAs. */
- DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
- DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
-
- start_dma_in(port);
- port->init_irqs = 0;
- } else { /* !port->use_dma */
+ } else { /* !port->use_dma */
#ifdef SYNC_SER_MANUAL
- if (port == &ports[0]) {
- if (request_irq(SYNCSER_INTR_VECT,
- manual_interrupt,
- 0,
- "synchronous serial manual irq",
- &ports[0])) {
- printk("Can't allocate sync serial manual irq");
- goto out;
- }
- }
-#ifdef CONFIG_ETRAXFS
- else if (port == &ports[1]) {
- if (request_irq(SSER1_INTR_VECT,
- manual_interrupt,
- 0,
- "synchronous serial manual irq",
- &ports[1])) {
- printk(KERN_CRIT "Can't allocate sync serial manual irq");
- goto out;
- }
- }
-#endif
- port->init_irqs = 0;
+ const char *tmp = dev == 0 ? "syncser0 manual irq" :
+ "syncser1 manual irq";
+ if (request_irq(port->syncser_intr_vect, manual_interrupt,
+ 0, tmp, port)) {
+ pr_err("Can't alloc syncser%d manual irq",
+ dev);
+ ret = -EBUSY;
+ goto unlock_and_exit;
+ }
+ port->init_irqs = manual_irq_setup;
#else
- panic("sync_serial: Manual mode not supported.\n");
+ panic("sync_serial: Manual mode not supported\n");
#endif /* SYNC_SER_MANUAL */
- }
-
- } /* port->init_irqs */
-
+ }
port->busy++;
ret = 0;
-out:
+
+unlock_and_exit:
mutex_unlock(&sync_serial_mutex);
return ret;
}
@@ -593,18 +541,17 @@ out:
static int sync_serial_release(struct inode *inode, struct file *file)
{
int dev = iminor(inode);
- sync_port *port;
+ struct sync_port *port;
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk("Invalid minor %d\n", dev));
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
return -ENODEV;
}
port = &ports[dev];
if (port->busy)
port->busy--;
if (!port->busy)
- /* XXX */ ;
+ /* XXX */;
return 0;
}
@@ -612,21 +559,15 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file));
unsigned int mask = 0;
- sync_port *port;
- DEBUGPOLL( static unsigned int prev_mask = 0; );
+ struct sync_port *port;
+ DEBUGPOLL(
+ static unsigned int prev_mask;
+ );
port = &ports[dev];
- if (!port->started) {
- reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
- reg_sser_rw_rec_cfg rec_cfg =
- REG_RD(sser, port->regi_sser, rw_rec_cfg);
- cfg.en = regk_sser_yes;
- rec_cfg.rec_en = port->input;
- REG_WR(sser, port->regi_sser, rw_cfg, cfg);
- REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
- port->started = 1;
- }
+ if (!port->started)
+ sync_serial_start_port(port);
poll_wait(file, &port->out_wait_q, wait);
poll_wait(file, &port->in_wait_q, wait);
@@ -645,33 +586,175 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
if (port->input && sync_data_avail(port) >= port->inbufchunk)
mask |= POLLIN | POLLRDNORM;
- DEBUGPOLL(if (mask != prev_mask)
- printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
- mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
- prev_mask = mask;
- );
+ DEBUGPOLL(
+ if (mask != prev_mask)
+ pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
+ mask,
+ mask & POLLOUT ? "POLLOUT" : "",
+ mask & POLLIN ? "POLLIN" : "");
+ prev_mask = mask;
+ );
return mask;
}
-static int sync_serial_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static ssize_t __sync_serial_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos,
+ struct timespec *ts)
+{
+ unsigned long flags;
+ int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int avail;
+ struct sync_port *port;
+ unsigned char *start;
+ unsigned char *end;
+
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
+ return -ENODEV;
+ }
+ port = &ports[dev];
+
+ if (!port->started)
+ sync_serial_start_port(port);
+
+ /* Calculate number of available bytes */
+ /* Save pointers to avoid that they are modified by interrupt */
+ spin_lock_irqsave(&port->lock, flags);
+ start = port->readp;
+ end = port->writep;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ while ((start == end) && !port->in_buffer_len) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(port->in_wait_q,
+ !(start == end && !port->full));
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ spin_lock_irqsave(&port->lock, flags);
+ start = port->readp;
+ end = port->writep;
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
+ dev, count,
+ start - port->flip, end - port->flip,
+ port->in_buffer_size));
+
+ /* Lazy read, never return wrapped data. */
+ if (end > start)
+ avail = end - start;
+ else
+ avail = port->flip + port->in_buffer_size - start;
+
+ count = count > avail ? avail : count;
+ if (copy_to_user(buf, start, count))
+ return -EFAULT;
+
+ /* If timestamp requested, find timestamp of first returned byte
+ * and copy it.
+ * N.B: Applications that request timstamps MUST read data in
+ * chunks that are multiples of IN_DESCR_SIZE.
+ * Otherwise the timestamps will not be aligned to the data read.
+ */
+ if (ts != NULL) {
+ int idx = port->read_ts_idx;
+ memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
+ port->read_ts_idx += count / IN_DESCR_SIZE;
+ if (port->read_ts_idx >= NBR_IN_DESCR)
+ port->read_ts_idx = 0;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->readp += count;
+ /* Check for wrap */
+ if (port->readp >= port->flip + port->in_buffer_size)
+ port->readp = port->flip;
+ port->in_buffer_len -= count;
+ port->full = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ DEBUGREAD(pr_info("r %d\n", count));
+
+ return count;
+}
+
+static ssize_t sync_serial_input(struct file *file, unsigned long arg)
+{
+ struct ssp_request req;
+ int count;
+ int ret;
+
+ /* Copy the request structure from user-mode. */
+ ret = copy_from_user(&req, (struct ssp_request __user *)arg,
+ sizeof(struct ssp_request));
+
+ if (ret) {
+ DEBUG(pr_info("sync_serial_input copy from user failed\n"));
+ return -EFAULT;
+ }
+
+ /* To get the timestamps aligned, make sure that 'len'
+ * is a multiple of IN_DESCR_SIZE.
+ */
+ if ((req.len % IN_DESCR_SIZE) != 0) {
+ DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
+ req.len, IN_DESCR_SIZE));
+ return -EFAULT;
+ }
+
+ /* Do the actual read. */
+ /* Note that req.buf is actually a pointer to user space. */
+ count = __sync_serial_read(file, req.buf, req.len,
+ NULL, &req.ts);
+
+ if (count < 0) {
+ DEBUG(pr_info("sync_serial_input read failed\n"));
+ return count;
+ }
+
+ /* Copy the request back to user-mode. */
+ ret = copy_to_user((struct ssp_request __user *)arg, &req,
+ sizeof(struct ssp_request));
+
+ if (ret) {
+ DEBUG(pr_info("syncser input copy2user failed\n"));
+ return -EFAULT;
+ }
+
+ /* Return the number of bytes read. */
+ return count;
+}
+
+
+static int sync_serial_ioctl_unlocked(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
int return_val = 0;
int dma_w_size = regk_dma_set_w_size1;
int dev = iminor(file_inode(file));
- sync_port *port;
+ struct sync_port *port;
reg_sser_rw_tr_cfg tr_cfg;
reg_sser_rw_rec_cfg rec_cfg;
reg_sser_rw_frm_cfg frm_cfg;
reg_sser_rw_cfg gen_cfg;
reg_sser_rw_intr_mask intr_mask;
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk("Invalid minor %d\n", dev));
+ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+ DEBUG(pr_info("Invalid minor %d\n", dev));
return -1;
}
- port = &ports[dev];
+
+ if (cmd == SSP_INPUT)
+ return sync_serial_input(file, arg);
+
+ port = &ports[dev];
spin_lock_irq(&port->lock);
tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
@@ -680,11 +763,9 @@ static int sync_serial_ioctl(struct file *file,
gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
- switch(cmd)
- {
+ switch (cmd) {
case SSP_SPEED:
- if (GET_SPEED(arg) == CODEC)
- {
+ if (GET_SPEED(arg) == CODEC) {
unsigned int freq;
gen_cfg.base_freq = regk_sser_f32;
@@ -701,15 +782,25 @@ static int sync_serial_ioctl(struct file *file,
case FREQ_256kHz:
gen_cfg.clk_div = 125 *
(1 << (freq - FREQ_256kHz)) - 1;
- break;
+ break;
case FREQ_512kHz:
gen_cfg.clk_div = 62;
- break;
+ break;
case FREQ_1MHz:
case FREQ_2MHz:
case FREQ_4MHz:
gen_cfg.clk_div = 8 * (1 << freq) - 1;
- break;
+ break;
+ }
+ } else if (GET_SPEED(arg) == CODEC_f32768) {
+ gen_cfg.base_freq = regk_sser_f32_768;
+ switch (GET_FREQ(arg)) {
+ case FREQ_4096kHz:
+ gen_cfg.clk_div = 7;
+ break;
+ default:
+ spin_unlock_irq(&port->lock);
+ return -EINVAL;
}
} else {
gen_cfg.base_freq = regk_sser_f29_493;
@@ -767,62 +858,64 @@ static int sync_serial_ioctl(struct file *file,
break;
case SSP_MODE:
- switch(arg)
- {
- case MASTER_OUTPUT:
- port->output = 1;
- port->input = 0;
- frm_cfg.out_on = regk_sser_tr;
- frm_cfg.frame_pin_dir = regk_sser_out;
- gen_cfg.clk_dir = regk_sser_out;
- break;
- case SLAVE_OUTPUT:
- port->output = 1;
- port->input = 0;
- frm_cfg.frame_pin_dir = regk_sser_in;
- gen_cfg.clk_dir = regk_sser_in;
- break;
- case MASTER_INPUT:
- port->output = 0;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_out;
- frm_cfg.out_on = regk_sser_intern_tb;
- gen_cfg.clk_dir = regk_sser_out;
- break;
- case SLAVE_INPUT:
- port->output = 0;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_in;
- gen_cfg.clk_dir = regk_sser_in;
- break;
- case MASTER_BIDIR:
- port->output = 1;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_out;
- frm_cfg.out_on = regk_sser_intern_tb;
- gen_cfg.clk_dir = regk_sser_out;
- break;
- case SLAVE_BIDIR:
- port->output = 1;
- port->input = 1;
- frm_cfg.frame_pin_dir = regk_sser_in;
- gen_cfg.clk_dir = regk_sser_in;
- break;
- default:
- spin_unlock_irq(&port->lock);
- return -EINVAL;
+ switch (arg) {
+ case MASTER_OUTPUT:
+ port->output = 1;
+ port->input = 0;
+ frm_cfg.out_on = regk_sser_tr;
+ frm_cfg.frame_pin_dir = regk_sser_out;
+ gen_cfg.clk_dir = regk_sser_out;
+ break;
+ case SLAVE_OUTPUT:
+ port->output = 1;
+ port->input = 0;
+ frm_cfg.frame_pin_dir = regk_sser_in;
+ gen_cfg.clk_dir = regk_sser_in;
+ break;
+ case MASTER_INPUT:
+ port->output = 0;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_out;
+ frm_cfg.out_on = regk_sser_intern_tb;
+ gen_cfg.clk_dir = regk_sser_out;
+ break;
+ case SLAVE_INPUT:
+ port->output = 0;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_in;
+ gen_cfg.clk_dir = regk_sser_in;
+ break;
+ case MASTER_BIDIR:
+ port->output = 1;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_out;
+ frm_cfg.out_on = regk_sser_intern_tb;
+ gen_cfg.clk_dir = regk_sser_out;
+ break;
+ case SLAVE_BIDIR:
+ port->output = 1;
+ port->input = 1;
+ frm_cfg.frame_pin_dir = regk_sser_in;
+ gen_cfg.clk_dir = regk_sser_in;
+ break;
+ default:
+ spin_unlock_irq(&port->lock);
+ return -EINVAL;
}
- if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
+ if (!port->use_dma || arg == MASTER_OUTPUT ||
+ arg == SLAVE_OUTPUT)
intr_mask.rdav = regk_sser_yes;
break;
case SSP_FRAME_SYNC:
if (arg & NORMAL_SYNC) {
frm_cfg.rec_delay = 1;
frm_cfg.tr_delay = 1;
- }
- else if (arg & EARLY_SYNC)
+ } else if (arg & EARLY_SYNC)
frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
- else if (arg & SECOND_WORD_SYNC) {
+ else if (arg & LATE_SYNC) {
+ frm_cfg.tr_delay = 2;
+ frm_cfg.rec_delay = 2;
+ } else if (arg & SECOND_WORD_SYNC) {
frm_cfg.rec_delay = 7;
frm_cfg.tr_delay = 1;
}
@@ -914,15 +1007,12 @@ static int sync_serial_ioctl(struct file *file,
frm_cfg.type = regk_sser_level;
frm_cfg.tr_delay = 1;
frm_cfg.level = regk_sser_neg_lo;
- if (arg & SPI_SLAVE)
- {
+ if (arg & SPI_SLAVE) {
rec_cfg.clk_pol = regk_sser_neg;
gen_cfg.clk_dir = regk_sser_in;
port->input = 1;
port->output = 0;
- }
- else
- {
+ } else {
gen_cfg.out_clk_pol = regk_sser_pos;
port->input = 0;
port->output = 1;
@@ -965,19 +1055,19 @@ static int sync_serial_ioctl(struct file *file,
}
static long sync_serial_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
- long ret;
+ long ret;
- mutex_lock(&sync_serial_mutex);
- ret = sync_serial_ioctl_unlocked(file, cmd, arg);
- mutex_unlock(&sync_serial_mutex);
+ mutex_lock(&sync_serial_mutex);
+ ret = sync_serial_ioctl_unlocked(file, cmd, arg);
+ mutex_unlock(&sync_serial_mutex);
- return ret;
+ return ret;
}
/* NOTE: sync_serial_write does not support concurrency */
-static ssize_t sync_serial_write(struct file *file, const char *buf,
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int dev = iminor(file_inode(file));
@@ -993,7 +1083,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
unsigned char *buf_stop_ptr; /* Last byte + 1 */
if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
- DEBUG(printk("Invalid minor %d\n", dev));
+ DEBUG(pr_info("Invalid minor %d\n", dev));
return -ENODEV;
}
port = &ports[dev];
@@ -1006,9 +1096,9 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
* |_________|___________________|________________________|
* ^ rd_ptr ^ wr_ptr
*/
- DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
- port->port_nbr, count, port->active_tr_descr,
- port->catch_tr_descr));
+ DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
+ port->port_nbr, count, port->active_tr_descr,
+ port->catch_tr_descr));
/* Read variables that may be updated by interrupts */
spin_lock_irqsave(&port->lock, flags);
@@ -1020,7 +1110,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
if (port->tr_running &&
((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
out_buf_count >= OUT_BUFFER_SIZE)) {
- DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
+ DEBUGWRITE(pr_info("sser%d full\n", dev));
return -EAGAIN;
}
@@ -1043,15 +1133,16 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
if (copy_from_user(wr_ptr, buf, trunc_count))
return -EFAULT;
- DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
- out_buf_count, trunc_count,
- port->out_buf_count, port->out_buffer,
- wr_ptr, buf_stop_ptr));
+ DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n",
+ out_buf_count, trunc_count,
+ port->out_buf_count, port->out_buffer,
+ wr_ptr, buf_stop_ptr));
/* Make sure transmitter/receiver is running */
if (!port->started) {
reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
- reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
+ reg_sser_rw_rec_cfg rec_cfg =
+ REG_RD(sser, port->regi_sser, rw_rec_cfg);
cfg.en = regk_sser_yes;
rec_cfg.rec_en = port->input;
REG_WR(sser, port->regi_sser, rw_cfg, cfg);
@@ -1068,8 +1159,11 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
spin_lock_irqsave(&port->lock, flags);
port->out_buf_count += trunc_count;
if (port->use_dma) {
+#ifdef SYNC_SER_DMA
start_dma_out(port, wr_ptr, trunc_count);
+#endif
} else if (!port->tr_running) {
+#ifdef SYNC_SER_MANUAL
reg_sser_rw_intr_mask intr_mask;
intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
/* Start sender by writing data */
@@ -1077,14 +1171,15 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
/* and enable transmitter ready IRQ */
intr_mask.trdy = 1;
REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
+#endif
}
spin_unlock_irqrestore(&port->lock, flags);
/* Exit if non blocking */
if (file->f_flags & O_NONBLOCK) {
- DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
- port->port_nbr, trunc_count,
- REG_RD_INT(dma, port->regi_dmaout, r_intr)));
+ DEBUGWRITE(pr_info("w d%d c %u %08x\n",
+ port->port_nbr, trunc_count,
+ REG_RD_INT(dma, port->regi_dmaout, r_intr)));
return trunc_count;
}
@@ -1094,105 +1189,32 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
if (signal_pending(current))
return -EINTR;
- DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
- port->port_nbr, trunc_count));
+ DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
return trunc_count;
}
-static ssize_t sync_serial_read(struct file * file, char * buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int dev = iminor(file_inode(file));
- int avail;
- sync_port *port;
- unsigned char* start;
- unsigned char* end;
- unsigned long flags;
-
- if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
- {
- DEBUG(printk("Invalid minor %d\n", dev));
- return -ENODEV;
- }
- port = &ports[dev];
-
- DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
-
- if (!port->started)
- {
- reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
- reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
- reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
- cfg.en = regk_sser_yes;
- tr_cfg.tr_en = regk_sser_yes;
- rec_cfg.rec_en = regk_sser_yes;
- REG_WR(sser, port->regi_sser, rw_cfg, cfg);
- REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
- REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
- port->started = 1;
- }
-
- /* Calculate number of available bytes */
- /* Save pointers to avoid that they are modified by interrupt */
- spin_lock_irqsave(&port->lock, flags);
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- spin_unlock_irqrestore(&port->lock, flags);
- while ((start == end) && !port->full) /* No data */
- {
- DEBUGREAD(printk(KERN_DEBUG "&"));
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- wait_event_interruptible(port->in_wait_q,
- !(start == end && !port->full));
- if (signal_pending(current))
- return -EINTR;
-
- spin_lock_irqsave(&port->lock, flags);
- start = (unsigned char*)port->readp; /* cast away volatile */
- end = (unsigned char*)port->writep; /* cast away volatile */
- spin_unlock_irqrestore(&port->lock, flags);
- }
-
- /* Lazy read, never return wrapped data. */
- if (port->full)
- avail = port->in_buffer_size;
- else if (end > start)
- avail = end - start;
- else
- avail = port->flip + port->in_buffer_size - start;
-
- count = count > avail ? avail : count;
- if (copy_to_user(buf, start, count))
- return -EFAULT;
- /* Disable interrupts while updating readp */
- spin_lock_irqsave(&port->lock, flags);
- port->readp += count;
- if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
- port->readp = port->flip;
- port->full = 0;
- spin_unlock_irqrestore(&port->lock, flags);
- DEBUGREAD(printk("r %d\n", count));
- return count;
+ return __sync_serial_read(file, buf, count, ppos, NULL);
}
-static void send_word(sync_port* port)
+#ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port)
{
reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
reg_sser_rw_tr_data tr_data = {0};
- switch(tr_cfg.sample_size)
+ switch (tr_cfg.sample_size) {
+ case 8:
+ port->out_buf_count--;
+ tr_data.data = *port->out_rd_ptr++;
+ REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
+ if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
+ port->out_rd_ptr = port->out_buffer;
+ break;
+ case 12:
{
- case 8:
- port->out_buf_count--;
- tr_data.data = *port->out_rd_ptr++;
- REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
- if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
- port->out_rd_ptr = port->out_buffer;
- break;
- case 12:
- {
int data = (*port->out_rd_ptr++) << 8;
data |= *port->out_rd_ptr++;
port->out_buf_count -= 2;
@@ -1200,8 +1222,8 @@ static void send_word(sync_port* port)
REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
port->out_rd_ptr = port->out_buffer;
+ break;
}
- break;
case 16:
port->out_buf_count -= 2;
tr_data.data = *(unsigned short *)port->out_rd_ptr;
@@ -1233,27 +1255,28 @@ static void send_word(sync_port* port)
break;
}
}
+#endif
-static void start_dma_out(struct sync_port *port,
- const char *data, int count)
+#ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count)
{
- port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
+ port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
port->active_tr_descr->after = port->active_tr_descr->buf + count;
port->active_tr_descr->intr = 1;
port->active_tr_descr->eol = 1;
port->prev_tr_descr->eol = 0;
- DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
+ DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
port->prev_tr_descr, port->active_tr_descr));
port->prev_tr_descr = port->active_tr_descr;
- port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
+ port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
if (!port->tr_running) {
reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
rw_tr_cfg);
- port->out_context.next = 0;
+ port->out_context.next = NULL;
port->out_context.saved_data =
(dma_descr_data *)virt_to_phys(port->prev_tr_descr);
port->out_context.saved_data_buf = port->prev_tr_descr->buf;
@@ -1263,57 +1286,58 @@ static void start_dma_out(struct sync_port *port,
tr_cfg.tr_en = regk_sser_yes;
REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
- DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
+ DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
} else {
DMA_CONTINUE_DATA(port->regi_dmaout);
- DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
+ DEBUGTRDMA(pr_info("dma c\n"););
}
port->tr_running = 1;
}
-static void start_dma_in(sync_port *port)
+static void start_dma_in(struct sync_port *port)
{
int i;
char *buf;
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
port->writep = port->flip;
+ spin_unlock_irqrestore(&port->lock, flags);
- if (port->writep > port->flip + port->in_buffer_size) {
- panic("Offset too large in sync serial driver\n");
- return;
- }
- buf = (char*)virt_to_phys(port->in_buffer);
+ buf = (char *)virt_to_phys(port->in_buffer);
for (i = 0; i < NBR_IN_DESCR; i++) {
port->in_descr[i].buf = buf;
port->in_descr[i].after = buf + port->inbufchunk;
port->in_descr[i].intr = 1;
- port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
+ port->in_descr[i].next =
+ (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
port->in_descr[i].buf = buf;
buf += port->inbufchunk;
}
/* Link the last descriptor to the first */
- port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+ port->in_descr[i-1].next =
+ (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
port->in_descr[i-1].eol = regk_sser_yes;
port->next_rx_desc = &port->in_descr[0];
port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
- port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+ port->in_context.saved_data =
+ (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
port->in_context.saved_data_buf = port->in_descr[0].buf;
DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
}
-#ifdef SYNC_SER_DMA
static irqreturn_t tr_interrupt(int irq, void *dev_id)
{
reg_dma_r_masked_intr masked;
- reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
+ reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
reg_dma_rw_stat stat;
int i;
int found = 0;
int stop_sser = 0;
for (i = 0; i < NBR_PORTS; i++) {
- sync_port *port = &ports[i];
- if (!port->enabled || !port->use_dma)
+ struct sync_port *port = &ports[i];
+ if (!port->enabled || !port->use_dma)
continue;
/* IRQ active for the port? */
@@ -1338,19 +1362,20 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
int sent;
sent = port->catch_tr_descr->after -
port->catch_tr_descr->buf;
- DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
- "in descr %p (ac: %p)\n",
- port->out_buf_count, sent,
- port->out_buf_count - sent,
- port->catch_tr_descr,
- port->active_tr_descr););
+ DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
+ "in descr %p (ac: %p)\n",
+ port->out_buf_count, sent,
+ port->out_buf_count - sent,
+ port->catch_tr_descr,
+ port->active_tr_descr););
port->out_buf_count -= sent;
port->catch_tr_descr =
phys_to_virt((int) port->catch_tr_descr->next);
port->out_rd_ptr =
phys_to_virt((int) port->catch_tr_descr->buf);
} else {
- int i, sent;
+ reg_sser_rw_tr_cfg tr_cfg;
+ int j, sent;
/* EOL handler.
* Note that if an EOL was encountered during the irq
* locked section of sync_ser_write the DMA will be
@@ -1358,11 +1383,11 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
* The remaining descriptors will be traversed by
* the descriptor interrupts as usual.
*/
- i = 0;
+ j = 0;
while (!port->catch_tr_descr->eol) {
sent = port->catch_tr_descr->after -
port->catch_tr_descr->buf;
- DEBUGOUTBUF(printk(KERN_DEBUG
+ DEBUGOUTBUF(pr_info(
"traversing descr %p -%d (%d)\n",
port->catch_tr_descr,
sent,
@@ -1370,16 +1395,15 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
port->out_buf_count -= sent;
port->catch_tr_descr = phys_to_virt(
(int)port->catch_tr_descr->next);
- i++;
- if (i >= NBR_OUT_DESCR) {
+ j++;
+ if (j >= NBR_OUT_DESCR) {
/* TODO: Reset and recover */
panic("sync_serial: missing eol");
}
}
sent = port->catch_tr_descr->after -
port->catch_tr_descr->buf;
- DEBUGOUTBUF(printk(KERN_DEBUG
- "eol at descr %p -%d (%d)\n",
+ DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
port->catch_tr_descr,
sent,
port->out_buf_count));
@@ -1394,15 +1418,13 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
OUT_BUFFER_SIZE)
port->out_rd_ptr = port->out_buffer;
- reg_sser_rw_tr_cfg tr_cfg =
- REG_RD(sser, port->regi_sser, rw_tr_cfg);
- DEBUGTXINT(printk(KERN_DEBUG
+ tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
+ DEBUGTXINT(pr_info(
"tr_int DMA stop %d, set catch @ %p\n",
port->out_buf_count,
port->active_tr_descr));
if (port->out_buf_count != 0)
- printk(KERN_CRIT "sync_ser: buffer not "
- "empty after eol.\n");
+ pr_err("sync_ser: buf not empty after eol\n");
port->catch_tr_descr = port->active_tr_descr;
port->tr_running = 0;
tr_cfg.tr_en = regk_sser_no;
@@ -1414,62 +1436,79 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
return IRQ_RETVAL(found);
} /* tr_interrupt */
+
+static inline void handle_rx_packet(struct sync_port *port)
+{
+ int idx;
+ reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
+ unsigned long flags;
+
+ DEBUGRXINT(pr_info(KERN_INFO "!"));
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* If we overrun the user experience is crap regardless if we
+ * drop new or old data. Its much easier to get it right when
+ * dropping new data so lets do that.
+ */
+ if ((port->writep + port->inbufchunk <=
+ port->flip + port->in_buffer_size) &&
+ (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
+ memcpy(port->writep,
+ phys_to_virt((unsigned)port->next_rx_desc->buf),
+ port->inbufchunk);
+ port->writep += port->inbufchunk;
+ if (port->writep >= port->flip + port->in_buffer_size)
+ port->writep = port->flip;
+
+ /* Timestamp the new data chunk. */
+ if (port->write_ts_idx == NBR_IN_DESCR)
+ port->write_ts_idx = 0;
+ idx = port->write_ts_idx++;
+ do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+ port->in_buffer_len += port->inbufchunk;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ port->next_rx_desc->eol = 1;
+ port->prev_rx_desc->eol = 0;
+ /* Cache bug workaround */
+ flush_dma_descr(port->prev_rx_desc, 0);
+ port->prev_rx_desc = port->next_rx_desc;
+ port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
+ /* Cache bug workaround */
+ flush_dma_descr(port->prev_rx_desc, 1);
+ /* wake up the waiting process */
+ wake_up_interruptible(&port->in_wait_q);
+ DMA_CONTINUE(port->regi_dmain);
+ REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+
+}
+
static irqreturn_t rx_interrupt(int irq, void *dev_id)
{
reg_dma_r_masked_intr masked;
- reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
int i;
int found = 0;
- for (i = 0; i < NBR_PORTS; i++)
- {
- sync_port *port = &ports[i];
+ DEBUG(pr_info("rx_interrupt\n"));
+
+ for (i = 0; i < NBR_PORTS; i++) {
+ struct sync_port *port = &ports[i];
- if (!port->enabled || !port->use_dma )
+ if (!port->enabled || !port->use_dma)
continue;
masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
- if (masked.data) /* Descriptor interrupt */
- {
- found = 1;
- while (REG_RD(dma, port->regi_dmain, rw_data) !=
- virt_to_phys(port->next_rx_desc)) {
- DEBUGRXINT(printk(KERN_DEBUG "!"));
- if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
- int first_size = port->flip + port->in_buffer_size - port->writep;
- memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
- memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
- port->writep = port->flip + port->inbufchunk - first_size;
- } else {
- memcpy((char*)port->writep,
- phys_to_virt((unsigned)port->next_rx_desc->buf),
- port->inbufchunk);
- port->writep += port->inbufchunk;
- if (port->writep >= port->flip + port->in_buffer_size)
- port->writep = port->flip;
- }
- if (port->writep == port->readp)
- {
- port->full = 1;
- }
-
- port->next_rx_desc->eol = 1;
- port->prev_rx_desc->eol = 0;
- /* Cache bug workaround */
- flush_dma_descr(port->prev_rx_desc, 0);
- port->prev_rx_desc = port->next_rx_desc;
- port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
- /* Cache bug workaround */
- flush_dma_descr(port->prev_rx_desc, 1);
- /* wake up the waiting process */
- wake_up_interruptible(&port->in_wait_q);
- DMA_CONTINUE(port->regi_dmain);
- REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+ if (!masked.data)
+ continue;
- }
- }
+ /* Descriptor interrupt */
+ found = 1;
+ while (REG_RD(dma, port->regi_dmain, rw_data) !=
+ virt_to_phys(port->next_rx_desc))
+ handle_rx_packet(port);
}
return IRQ_RETVAL(found);
} /* rx_interrupt */
@@ -1478,75 +1517,83 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id)
#ifdef SYNC_SER_MANUAL
static irqreturn_t manual_interrupt(int irq, void *dev_id)
{
+ unsigned long flags;
int i;
int found = 0;
reg_sser_r_masked_intr masked;
- for (i = 0; i < NBR_PORTS; i++)
- {
- sync_port *port = &ports[i];
+ for (i = 0; i < NBR_PORTS; i++) {
+ struct sync_port *port = &ports[i];
if (!port->enabled || port->use_dma)
- {
continue;
- }
masked = REG_RD(sser, port->regi_sser, r_masked_intr);
- if (masked.rdav) /* Data received? */
- {
- reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
- reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
+ /* Data received? */
+ if (masked.rdav) {
+ reg_sser_rw_rec_cfg rec_cfg =
+ REG_RD(sser, port->regi_sser, rw_rec_cfg);
+ reg_sser_r_rec_data data = REG_RD(sser,
+ port->regi_sser, r_rec_data);
found = 1;
/* Read data */
- switch(rec_cfg.sample_size)
- {
+ spin_lock_irqsave(&port->lock, flags);
+ switch (rec_cfg.sample_size) {
case 8:
*port->writep++ = data.data & 0xff;
break;
case 12:
*port->writep = (data.data & 0x0ff0) >> 4;
*(port->writep + 1) = data.data & 0x0f;
- port->writep+=2;
+ port->writep += 2;
break;
case 16:
- *(unsigned short*)port->writep = data.data;
- port->writep+=2;
+ *(unsigned short *)port->writep = data.data;
+ port->writep += 2;
break;
case 24:
- *(unsigned int*)port->writep = data.data;
- port->writep+=3;
+ *(unsigned int *)port->writep = data.data;
+ port->writep += 3;
break;
case 32:
- *(unsigned int*)port->writep = data.data;
- port->writep+=4;
+ *(unsigned int *)port->writep = data.data;
+ port->writep += 4;
break;
}
- if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
+ /* Wrap? */
+ if (port->writep >= port->flip + port->in_buffer_size)
port->writep = port->flip;
if (port->writep == port->readp) {
- /* receive buffer overrun, discard oldest data
- */
+ /* Receive buf overrun, discard oldest data */
port->readp++;
- if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
+ /* Wrap? */
+ if (port->readp >= port->flip +
+ port->in_buffer_size)
port->readp = port->flip;
}
+ spin_unlock_irqrestore(&port->lock, flags);
if (sync_data_avail(port) >= port->inbufchunk)
- wake_up_interruptible(&port->in_wait_q); /* Wake up application */
+ /* Wake up application */
+ wake_up_interruptible(&port->in_wait_q);
}
- if (masked.trdy) /* Transmitter ready? */
- {
+ /* Transmitter ready? */
+ if (masked.trdy) {
found = 1;
- if (port->out_buf_count > 0) /* More data to send */
+ /* More data to send */
+ if (port->out_buf_count > 0)
send_word(port);
- else /* transmission finished */
- {
+ else {
+ /* Transmission finished */
reg_sser_rw_intr_mask intr_mask;
- intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
+ intr_mask = REG_RD(sser, port->regi_sser,
+ rw_intr_mask);
intr_mask.trdy = 0;
- REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
- wake_up_interruptible(&port->out_wait_q); /* Wake up application */
+ REG_WR(sser, port->regi_sser,
+ rw_intr_mask, intr_mask);
+ /* Wake up application */
+ wake_up_interruptible(&port->out_wait_q);
}
}
}
@@ -1554,4 +1601,109 @@ static irqreturn_t manual_interrupt(int irq, void *dev_id)
}
#endif
+static int __init etrax_sync_serial_init(void)
+{
+#if 1
+ /* This code will be removed when we move to udev for all devices. */
+ syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
+ if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
+ pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
+ return -1;
+ }
+#else
+ /* Allocate dynamic major number. */
+ if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
+ pr_err("Failed to allocate character device region\n");
+ return -1;
+ }
+#endif
+ syncser_cdev = cdev_alloc();
+ if (!syncser_cdev) {
+ pr_err("Failed to allocate cdev for syncser\n");
+ unregister_chrdev_region(syncser_first, minor_count);
+ return -1;
+ }
+ cdev_init(syncser_cdev, &syncser_fops);
+
+ /* Create a sysfs class for syncser */
+ syncser_class = class_create(THIS_MODULE, "syncser_class");
+
+ /* Initialize Ports */
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
+ if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
+ pr_warn("Unable to alloc pins for synchronous serial port 0\n");
+ unregister_chrdev_region(syncser_first, minor_count);
+ return -EIO;
+ }
+ initialize_port(0);
+ ports[0].enabled = 1;
+ /* Register with sysfs so udev can pick it up. */
+ device_create(syncser_class, NULL, syncser_first, NULL,
+ "%s%d", SYNCSER_NAME, 0);
+#endif
+
+#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
+ if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
+ pr_warn("Unable to alloc pins for synchronous serial port 1\n");
+ unregister_chrdev_region(syncser_first, minor_count);
+ class_destroy(syncser_class);
+ return -EIO;
+ }
+ initialize_port(1);
+ ports[1].enabled = 1;
+ /* Register with sysfs so udev can pick it up. */
+ device_create(syncser_class, NULL, syncser_first, NULL,
+ "%s%d", SYNCSER_NAME, 0);
+#endif
+
+ /* Add it to system */
+ if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
+ pr_err("Failed to add syncser as char device\n");
+ device_destroy(syncser_class, syncser_first);
+ class_destroy(syncser_class);
+ cdev_del(syncser_cdev);
+ unregister_chrdev_region(syncser_first, minor_count);
+ return -1;
+ }
+
+
+ pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
+ SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
+
+ return 0;
+}
+
+static void __exit etrax_sync_serial_exit(void)
+{
+ int i;
+ device_destroy(syncser_class, syncser_first);
+ class_destroy(syncser_class);
+
+ if (syncser_cdev) {
+ cdev_del(syncser_cdev);
+ unregister_chrdev_region(syncser_first, minor_count);
+ }
+ for (i = 0; i < NBR_PORTS; i++) {
+ struct sync_port *port = &ports[i];
+ if (port->init_irqs == dma_irq_setup) {
+ /* Free dma irqs and dma channels. */
+#ifdef SYNC_SER_DMA
+ artpec_free_dma(port->dma_in_nbr);
+ artpec_free_dma(port->dma_out_nbr);
+ free_irq(port->dma_out_intr_vect, port);
+ free_irq(port->dma_in_intr_vect, port);
+#endif
+ } else if (port->init_irqs == manual_irq_setup) {
+ /* Free manual irq. */
+ free_irq(port->syncser_intr_vect, port);
+ }
+ }
+
+ pr_info("ARTPEC synchronous serial port unregistered\n");
+}
+
module_init(etrax_sync_serial_init);
+module_exit(etrax_sync_serial_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c
index 610909b003f6..02e33ebe51ec 100644
--- a/arch/cris/arch-v32/kernel/debugport.c
+++ b/arch/cris/arch-v32/kernel/debugport.c
@@ -3,7 +3,9 @@
*/
#include <linux/console.h>
+#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/string.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/reg_map.h>
#include <hwregs/ser_defs.h>
@@ -65,6 +67,7 @@ struct dbg_port ports[] =
},
#endif
};
+
static struct dbg_port *port =
#if defined(CONFIG_ETRAX_DEBUG_PORT0)
&ports[0];
@@ -97,14 +100,19 @@ static struct dbg_port *kgdb_port =
#endif
#endif
-static void
-start_port(struct dbg_port* p)
+static void start_port(struct dbg_port *p)
{
- if (!p)
- return;
+ /* Set up serial port registers */
+ reg_ser_rw_tr_ctrl tr_ctrl = {0};
+ reg_ser_rw_tr_dma_en tr_dma_en = {0};
- if (p->started)
+ reg_ser_rw_rec_ctrl rec_ctrl = {0};
+ reg_ser_rw_tr_baud_div tr_baud_div = {0};
+ reg_ser_rw_rec_baud_div rec_baud_div = {0};
+
+ if (!p || p->started)
return;
+
p->started = 1;
if (p->nbr == 1)
@@ -118,36 +126,24 @@ start_port(struct dbg_port* p)
crisv32_pinmux_alloc_fixed(pinmux_ser4);
#endif
- /* Set up serial port registers */
- reg_ser_rw_tr_ctrl tr_ctrl = {0};
- reg_ser_rw_tr_dma_en tr_dma_en = {0};
-
- reg_ser_rw_rec_ctrl rec_ctrl = {0};
- reg_ser_rw_tr_baud_div tr_baud_div = {0};
- reg_ser_rw_rec_baud_div rec_baud_div = {0};
-
tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493;
tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no;
tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8;
tr_ctrl.en = rec_ctrl.en = 1;
- if (p->parity == 'O')
- {
+ if (p->parity == 'O') {
tr_ctrl.par_en = regk_ser_yes;
tr_ctrl.par = regk_ser_odd;
rec_ctrl.par_en = regk_ser_yes;
rec_ctrl.par = regk_ser_odd;
- }
- else if (p->parity == 'E')
- {
+ } else if (p->parity == 'E') {
tr_ctrl.par_en = regk_ser_yes;
tr_ctrl.par = regk_ser_even;
rec_ctrl.par_en = regk_ser_yes;
rec_ctrl.par = regk_ser_odd;
}
- if (p->bits == 7)
- {
+ if (p->bits == 7) {
tr_ctrl.data_bits = regk_ser_bits7;
rec_ctrl.data_bits = regk_ser_bits7;
}
@@ -161,8 +157,7 @@ start_port(struct dbg_port* p)
#ifdef CONFIG_ETRAX_KGDB
/* Use polling to get a single character from the kernel debug port */
-int
-getDebugChar(void)
+int getDebugChar(void)
{
reg_ser_rs_stat_din stat;
reg_ser_rw_ack_intr ack_intr = { 0 };
@@ -179,8 +174,7 @@ getDebugChar(void)
}
/* Use polling to put a single character to the kernel debug port */
-void
-putDebugChar(int val)
+void putDebugChar(int val)
{
reg_ser_r_stat_din stat;
do {
@@ -190,12 +184,48 @@ putDebugChar(int val)
}
#endif /* CONFIG_ETRAX_KGDB */
+static void __init early_putch(int c)
+{
+ reg_ser_r_stat_din stat;
+ /* Wait until transmitter is ready and send. */
+ do
+ stat = REG_RD(ser, port->instance, r_stat_din);
+ while (!stat.tr_rdy);
+ REG_WR_INT(ser, port->instance, rw_dout, c);
+}
+
+static void __init
+early_console_write(struct console *con, const char *s, unsigned n)
+{
+ extern void reset_watchdog(void);
+ int i;
+
+ /* Send data. */
+ for (i = 0; i < n; i++) {
+ /* TODO: the '\n' -> '\n\r' translation should be done at the
+ receiver. Remove it when the serial driver removes it. */
+ if (s[i] == '\n')
+ early_putch('\r');
+ early_putch(s[i]);
+ reset_watchdog();
+ }
+}
+
+static struct console early_console_dev __initdata = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
/* Register console for printk's, etc. */
-int __init
-init_etrax_debug(void)
+int __init init_etrax_debug(void)
{
start_port(port);
+ /* Register an early console if a debug port was chosen. */
+ register_console(&early_console_dev);
+
#ifdef CONFIG_ETRAX_KGDB
start_port(kgdb_port);
#endif /* CONFIG_ETRAX_KGDB */
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index ee66866538f8..eb74dabbeb96 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/cpufreq.h>
+#include <linux/mm.h>
#include <asm/types.h>
#include <asm/signal.h>
#include <asm/io.h>
@@ -56,7 +57,6 @@ static int __init etrax_init_cont_rotime(void)
}
arch_initcall(etrax_init_cont_rotime);
-
unsigned long timer_regs[NR_CPUS] =
{
regi_timer0,
@@ -68,9 +68,8 @@ unsigned long timer_regs[NR_CPUS] =
extern int set_rtc_mmss(unsigned long nowtime);
#ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
- void *data);
+static int cris_time_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data);
static struct notifier_block cris_time_freq_notifier_block = {
.notifier_call = cris_time_freq_notifier,
@@ -87,7 +86,6 @@ unsigned long get_ns_in_jiffie(void)
return ns;
}
-
/* From timer MDS describing the hardware watchdog:
* 4.3.1 Watchdog Operation
* The watchdog timer is an 8-bit timer with a configurable start value.
@@ -109,11 +107,18 @@ static short int watchdog_key = 42; /* arbitrary 7 bit number */
* is used though, so set this really low. */
#define WATCHDOG_MIN_FREE_PAGES 8
+/* for reliable NICE_DOGGY behaviour */
+static int bite_in_progress;
+
void reset_watchdog(void)
{
#if defined(CONFIG_ETRAX_WATCHDOG)
reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
+#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
+ if (unlikely(bite_in_progress))
+ return;
+#endif
/* Only keep watchdog happy as long as we have memory left! */
if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
/* Reset the watchdog with the inverse of the old key */
@@ -148,7 +153,9 @@ void handle_watchdog_bite(struct pt_regs *regs)
#if defined(CONFIG_ETRAX_WATCHDOG)
extern int cause_of_death;
+ nmi_enter();
oops_in_progress = 1;
+ bite_in_progress = 1;
printk(KERN_WARNING "Watchdog bite\n");
/* Check if forced restart or unexpected watchdog */
@@ -170,6 +177,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
printk(KERN_WARNING "Oops: bitten by watchdog\n");
show_registers(regs);
oops_in_progress = 0;
+ printk("\n"); /* Flush mtdoops. */
#ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
reset_watchdog();
#endif
@@ -202,7 +210,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
/* Reset watchdog otherwise it resets us! */
reset_watchdog();
- /* Update statistics. */
+ /* Update statistics. */
update_process_times(user_mode(regs));
cris_do_profile(regs); /* Save profiling information */
@@ -213,7 +221,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
/* Call the real timer interrupt handler */
xtime_update(1);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
}
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
@@ -293,14 +301,13 @@ void __init time_init(void)
#ifdef CONFIG_CPU_FREQ
cpufreq_register_notifier(&cris_time_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
}
#ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int cris_time_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
{
struct cpufreq_freqs *freqs = data;
if (val == CPUFREQ_POSTCHANGE) {
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
index 0b5b70d5f58a..f0f335d8aa79 100644
--- a/arch/cris/arch-v32/lib/usercopy.c
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -26,8 +26,7 @@
/* Copy to userspace. This is based on the memcpy used for
kernel-to-kernel copying; see "string.c". */
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -155,13 +154,13 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__copy_user);
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
-
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -321,11 +320,10 @@ copy_exception_bytes:
return retn + n;
}
+EXPORT_SYMBOL(__copy_user_zeroing);
/* Zero userspace. */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
{
/* We want the parameters put in special registers.
Make sure the compiler is able to make something useful of this.
@@ -468,3 +466,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
return retn;
}
+EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index 38f29eec14a6..05a04708b8eb 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -26,7 +26,29 @@ static DEFINE_SPINLOCK(pinmux_lock);
static void crisv32_pinmux_set(int port);
-int crisv32_pinmux_init(void)
+static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+ enum pin_mode mode)
+{
+ int i;
+
+ for (i = first_pin; i <= last_pin; i++) {
+ if ((pins[port][i] != pinmux_none)
+ && (pins[port][i] != pinmux_gpio)
+ && (pins[port][i] != mode)) {
+#ifdef DEBUG
+ panic("Pinmux alloc failed!\n");
+#endif
+ return -EPERM;
+ }
+ }
+
+ for (i = first_pin; i <= last_pin; i++)
+ pins[port][i] = mode;
+
+ crisv32_pinmux_set(port);
+}
+
+static int crisv32_pinmux_init(void)
{
static int initialized;
@@ -37,20 +59,20 @@ int crisv32_pinmux_init(void)
pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
REG_WR(pinmux, regi_pinmux, rw_pa, pa);
- crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
- crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
- crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
- crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
+ __crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
}
return 0;
}
-int
-crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
+int crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+ enum pin_mode mode)
{
- int i;
unsigned long flags;
+ int ret;
crisv32_pinmux_init();
@@ -59,26 +81,11 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
spin_lock_irqsave(&pinmux_lock, flags);
- for (i = first_pin; i <= last_pin; i++) {
- if ((pins[port][i] != pinmux_none)
- && (pins[port][i] != pinmux_gpio)
- && (pins[port][i] != mode)) {
- spin_unlock_irqrestore(&pinmux_lock, flags);
-#ifdef DEBUG
- panic("Pinmux alloc failed!\n");
-#endif
- return -EPERM;
- }
- }
-
- for (i = first_pin; i <= last_pin; i++)
- pins[port][i] = mode;
-
- crisv32_pinmux_set(port);
+ ret = __crisv32_pinmux_alloc(port, first_pin, last_pin, mode);
spin_unlock_irqrestore(&pinmux_lock, flags);
- return 0;
+ return ret;
}
int crisv32_pinmux_alloc_fixed(enum fixed_function function)
@@ -98,58 +105,58 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function)
switch (function) {
case pinmux_ser1:
- ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
hwprot.ser1 = regk_pinmux_yes;
break;
case pinmux_ser2:
- ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
hwprot.ser2 = regk_pinmux_yes;
break;
case pinmux_ser3:
- ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
hwprot.ser3 = regk_pinmux_yes;
break;
case pinmux_sser0:
- ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.sser0 = regk_pinmux_yes;
break;
case pinmux_sser1:
- ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
hwprot.sser1 = regk_pinmux_yes;
break;
case pinmux_ata0:
- ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
hwprot.ata0 = regk_pinmux_yes;
break;
case pinmux_ata1:
- ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
hwprot.ata1 = regk_pinmux_yes;
break;
case pinmux_ata2:
- ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata3:
- ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata:
- ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
- ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
+ ret |= __crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
hwprot.ata = regk_pinmux_yes;
break;
case pinmux_eth1:
- ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
hwprot.eth1 = regk_pinmux_yes;
hwprot.eth1_mgm = regk_pinmux_yes;
break;
case pinmux_timer:
- ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+ ret = __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.timer = regk_pinmux_yes;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
@@ -188,9 +195,19 @@ void crisv32_pinmux_set(int port)
#endif
}
-int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+static int __crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
{
int i;
+
+ for (i = first_pin; i <= last_pin; i++)
+ pins[port][i] = pinmux_none;
+
+ crisv32_pinmux_set(port);
+ return 0;
+}
+
+int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+{
unsigned long flags;
crisv32_pinmux_init();
@@ -199,11 +216,7 @@ int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
-
- for (i = first_pin; i <= last_pin; i++)
- pins[port][i] = pinmux_none;
-
- crisv32_pinmux_set(port);
+ __crisv32_pinmux_dealloc(port, first_pin, last_pin);
spin_unlock_irqrestore(&pinmux_lock, flags);
return 0;
@@ -226,58 +239,58 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
switch (function) {
case pinmux_ser1:
- ret = crisv32_pinmux_dealloc(PORT_C, 4, 7);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 4, 7);
hwprot.ser1 = regk_pinmux_no;
break;
case pinmux_ser2:
- ret = crisv32_pinmux_dealloc(PORT_C, 8, 11);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 8, 11);
hwprot.ser2 = regk_pinmux_no;
break;
case pinmux_ser3:
- ret = crisv32_pinmux_dealloc(PORT_C, 12, 15);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 12, 15);
hwprot.ser3 = regk_pinmux_no;
break;
case pinmux_sser0:
- ret = crisv32_pinmux_dealloc(PORT_C, 0, 3);
- ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 0, 3);
+ ret |= __crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.sser0 = regk_pinmux_no;
break;
case pinmux_sser1:
- ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
+ ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
hwprot.sser1 = regk_pinmux_no;
break;
case pinmux_ata0:
- ret = crisv32_pinmux_dealloc(PORT_D, 5, 7);
- ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17);
+ ret = __crisv32_pinmux_dealloc(PORT_D, 5, 7);
+ ret |= __crisv32_pinmux_dealloc(PORT_D, 15, 17);
hwprot.ata0 = regk_pinmux_no;
break;
case pinmux_ata1:
- ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
- ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17);
+ ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
+ ret |= __crisv32_pinmux_dealloc(PORT_E, 17, 17);
hwprot.ata1 = regk_pinmux_no;
break;
case pinmux_ata2:
- ret = crisv32_pinmux_dealloc(PORT_C, 11, 15);
- ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 11, 15);
+ ret |= __crisv32_pinmux_dealloc(PORT_E, 3, 3);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata3:
- ret = crisv32_pinmux_dealloc(PORT_C, 8, 10);
- ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 8, 10);
+ ret |= __crisv32_pinmux_dealloc(PORT_C, 0, 2);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata:
- ret = crisv32_pinmux_dealloc(PORT_B, 0, 15);
- ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15);
+ ret = __crisv32_pinmux_dealloc(PORT_B, 0, 15);
+ ret |= __crisv32_pinmux_dealloc(PORT_D, 8, 15);
hwprot.ata = regk_pinmux_no;
break;
case pinmux_eth1:
- ret = crisv32_pinmux_dealloc(PORT_E, 0, 17);
+ ret = __crisv32_pinmux_dealloc(PORT_E, 0, 17);
hwprot.eth1 = regk_pinmux_no;
hwprot.eth1_mgm = regk_pinmux_no;
break;
case pinmux_timer:
- ret = crisv32_pinmux_dealloc(PORT_C, 16, 16);
+ ret = __crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.timer = regk_pinmux_no;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
@@ -293,7 +306,8 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
return ret;
}
-void crisv32_pinmux_dump(void)
+#ifdef DEBUG
+static void crisv32_pinmux_dump(void)
{
int i, j;
@@ -305,5 +319,5 @@ void crisv32_pinmux_dump(void)
printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]);
}
}
-
+#endif
__initcall(crisv32_pinmux_init);
diff --git a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
index c2b3036779df..09bf0c90d2d3 100644
--- a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
+++ b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
@@ -28,11 +28,9 @@ enum fixed_function {
pinmux_timer
};
-int crisv32_pinmux_init(void);
int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode);
int crisv32_pinmux_alloc_fixed(enum fixed_function function);
int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin);
int crisv32_pinmux_dealloc_fixed(enum fixed_function function);
-void crisv32_pinmux_dump(void);
#endif
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d5f124832fd1..889f2de050a3 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,8 +1,4 @@
-header-y += arch-v10/
-header-y += arch-v32/
-
-
generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 7d47b366ad82..01f66b8f15e5 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,8 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += arch-v10/
-header-y += arch-v32/
+header-y += ../arch-v10/arch/
+header-y += ../arch-v32/arch/
header-y += auxvec.h
header-y += bitsperlong.h
header-y += byteorder.h
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 5868cee20ebd..3908b942fd4c 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -47,16 +47,16 @@ EXPORT_SYMBOL(__negdi2);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
-/* Userspace access functions */
-EXPORT_SYMBOL(__copy_user_zeroing);
-EXPORT_SYMBOL(__copy_user);
-
#undef memcpy
#undef memset
extern void * memset(void *, int, __kernel_size_t);
extern void * memcpy(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
+#ifdef CONFIG_ETRAX_ARCH_V32
+#undef strcmp
+EXPORT_SYMBOL(strcmp);
+#endif
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Fast timer functions */
@@ -66,3 +66,4 @@ EXPORT_SYMBOL(del_fast_timer);
EXPORT_SYMBOL(schedule_usleep);
#endif
EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index 0ffda73734f5..da4c72401e27 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -14,6 +14,10 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/utsname.h>
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+#endif
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -34,25 +38,24 @@ static int kstack_depth_to_print = 24;
void (*nmi_handler)(struct pt_regs *);
-void
-show_trace(unsigned long *stack)
+void show_trace(unsigned long *stack)
{
unsigned long addr, module_start, module_end;
extern char _stext, _etext;
int i;
- printk("\nCall Trace: ");
+ pr_err("\nCall Trace: ");
i = 1;
module_start = VMALLOC_START;
module_end = VMALLOC_END;
- while (((long)stack & (THREAD_SIZE-1)) != 0) {
+ while (((long)stack & (THREAD_SIZE - 1)) != 0) {
if (__get_user(addr, stack)) {
/* This message matches "failing address" marked
s390 in ksymoops, so lines containing it will
not be filtered out by ksymoops. */
- printk("Failing address 0x%lx\n", (unsigned long)stack);
+ pr_err("Failing address 0x%lx\n", (unsigned long)stack);
break;
}
stack++;
@@ -68,10 +71,14 @@ show_trace(unsigned long *stack)
if (((addr >= (unsigned long)&_stext) &&
(addr <= (unsigned long)&_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
+#ifdef CONFIG_KALLSYMS
+ print_ip_sym(addr);
+#else
if (i && ((i % 8) == 0))
- printk("\n ");
- printk("[<%08lx>] ", addr);
+ pr_err("\n ");
+ pr_err("[<%08lx>] ", addr);
i++;
+#endif
}
}
}
@@ -111,21 +118,21 @@ show_stack(struct task_struct *task, unsigned long *sp)
stack = sp;
- printk("\nStack from %08lx:\n ", (unsigned long)stack);
+ pr_err("\nStack from %08lx:\n ", (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (((long)stack & (THREAD_SIZE-1)) == 0)
break;
if (i && ((i % 8) == 0))
- printk("\n ");
+ pr_err("\n ");
if (__get_user(addr, stack)) {
/* This message matches "failing address" marked
s390 in ksymoops, so lines containing it will
not be filtered out by ksymoops. */
- printk("Failing address 0x%lx\n", (unsigned long)stack);
+ pr_err("Failing address 0x%lx\n", (unsigned long)stack);
break;
}
stack++;
- printk("%08lx ", addr);
+ pr_err("%08lx ", addr);
}
show_trace(sp);
}
@@ -139,33 +146,32 @@ show_stack(void)
unsigned long *sp = (unsigned long *)rdusp();
int i;
- printk("Stack dump [0x%08lx]:\n", (unsigned long)sp);
+ pr_err("Stack dump [0x%08lx]:\n", (unsigned long)sp);
for (i = 0; i < 16; i++)
- printk("sp + %d: 0x%08lx\n", i*4, sp[i]);
+ pr_err("sp + %d: 0x%08lx\n", i*4, sp[i]);
return 0;
}
#endif
-void
-set_nmi_handler(void (*handler)(struct pt_regs *))
+void set_nmi_handler(void (*handler)(struct pt_regs *))
{
nmi_handler = handler;
arch_enable_nmi();
}
#ifdef CONFIG_DEBUG_NMI_OOPS
-void
-oops_nmi_handler(struct pt_regs *regs)
+void oops_nmi_handler(struct pt_regs *regs)
{
stop_watchdog();
oops_in_progress = 1;
- printk("NMI!\n");
+ pr_err("NMI!\n");
show_registers(regs);
oops_in_progress = 0;
+ oops_exit();
+ pr_err("\n"); /* Flush mtdoops. */
}
-static int __init
-oops_nmi_register(void)
+static int __init oops_nmi_register(void)
{
set_nmi_handler(oops_nmi_handler);
return 0;
@@ -180,8 +186,7 @@ __initcall(oops_nmi_register);
* similar to an Oops dump, and if the kernel is configured to be a nice
* doggy, then halt instead of reboot.
*/
-void
-watchdog_bite_hook(struct pt_regs *regs)
+void watchdog_bite_hook(struct pt_regs *regs)
{
#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
local_irq_disable();
@@ -196,8 +201,7 @@ watchdog_bite_hook(struct pt_regs *regs)
}
/* This is normally the Oops function. */
-void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs))
return;
@@ -211,13 +215,17 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
stop_watchdog();
#endif
+ oops_enter();
handle_BUG(regs);
- printk("%s: %04lx\n", str, err & 0xffff);
+ pr_err("Linux %s %s\n", utsname()->release, utsname()->version);
+ pr_err("%s: %04lx\n", str, err & 0xffff);
show_registers(regs);
+ oops_exit();
oops_in_progress = 0;
+ pr_err("\n"); /* Flush mtdoops. */
#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
reset_watchdog();
@@ -225,8 +233,7 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
do_exit(SIGSEGV);
}
-void __init
-trap_init(void)
+void __init trap_init(void)
{
/* Nothing needs to be done */
}
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index c81af5bd9167..1e7fd45b60f8 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -11,13 +11,15 @@
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+#include <linux/kcore.h>
#include <asm/tlb.h>
#include <asm/sections.h>
unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
-void __init
-mem_init(void)
+void __init mem_init(void)
{
BUG_ON(!mem_map);
@@ -31,10 +33,36 @@ mem_init(void)
mem_init_print_info(NULL);
}
-/* free the pages occupied by initialization code */
+/* Free a range of init pages. Virtual addresses. */
-void
-free_initmem(void)
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+/* Free the pages occupied by initialization code. */
+
+void free_initmem(void)
{
free_initmem_default(-1);
}
+
+/* Free the pages occupied by initrd code. */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory",
+ start,
+ end);
+}
+#endif
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c
index f9ca44bdea20..80fdb995a8ce 100644
--- a/arch/cris/mm/ioremap.c
+++ b/arch/cris/mm/ioremap.c
@@ -76,10 +76,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
* Must be freed with iounmap.
*/
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
}
+EXPORT_SYMBOL(ioremap_nocache);
void iounmap(volatile void __iomem *addr)
{
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
index 263511719a4a..69952c184207 100644
--- a/arch/hexagon/include/asm/cache.h
+++ b/arch/hexagon/include/asm/cache.h
@@ -1,7 +1,7 @@
/*
* Cache definitions for the Hexagon architecture
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,8 @@
#define L1_CACHE_SHIFT (5)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
index 49e0896ec240..b86f9f300e94 100644
--- a/arch/hexagon/include/asm/cacheflush.h
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -21,10 +21,7 @@
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <asm/string.h>
-#include <asm-generic/cacheflush.h>
+#include <linux/mm_types.h>
/* Cache flushing:
*
@@ -41,6 +38,20 @@
#define LINESIZE 32
#define LINEBITS 5
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
/*
* Flush Dcache range through current map.
*/
@@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end);
/*
* Flush Icache range through current map.
*/
-#undef flush_icache_range
extern void flush_icache_range(unsigned long start, unsigned long end);
/*
@@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
/* generic_ptrace_pokedata doesn't wind up here, does it? */
}
-#undef copy_to_user_page
-static inline void copy_to_user_page(struct vm_area_struct *vma,
- struct page *page,
- unsigned long vaddr,
- void *dst, void *src, int len)
-{
- memcpy(dst, src, len);
- if (vma->vm_flags & VM_EXEC) {
- flush_icache_range((unsigned long) dst,
- (unsigned long) dst + len);
- }
-}
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len);
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 70298996e9b2..66f5e9a61efc 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -24,14 +24,9 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <asm/string.h>
-#include <asm/mem-layout.h>
#include <asm/iomap.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
/*
* We don't have PCI yet.
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 0e7c1dbb37b2..6981949f5df3 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -19,6 +19,7 @@
*/
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 7858663352b9..110dab152f82 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -1,7 +1,7 @@
/*
* Kernel traps/events for Hexagon processor
*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -423,7 +423,7 @@ void do_trap0(struct pt_regs *regs)
*/
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) pt_elr(regs);
- send_sig_info(SIGTRAP, &info, current);
+ force_sig_info(SIGTRAP, &info, current);
} else {
#ifdef CONFIG_KGDB
kgdb_handle_exception(pt_cause(regs), SIGTRAP,
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 44d8c47bae2f..5f268c1071b3 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
/*
* Linker script for Hexagon kernel
*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,7 +59,7 @@ SECTIONS
INIT_DATA_SECTION(PAGE_SIZE)
_sdata = .;
- RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
+ RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
RO_DATA_SECTION(PAGE_SIZE)
_edata = .;
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
index 0c76c802e31c..a7c6d827d8b6 100644
--- a/arch/hexagon/mm/cache.c
+++ b/arch/hexagon/mm/cache.c
@@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void)
local_irq_restore(flags);
mb();
}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC) {
+ flush_icache_range((unsigned long) dst,
+ (unsigned long) dst + len);
+ }
+}
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index 5905fd5f97f6..d27d67224046 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 14aa1c58912b..0ec484d2dcbc 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -35,8 +35,8 @@ extern void *per_cpu_init(void);
/*
* Be extremely careful when taking the address of this variable! Due to virtual
- * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
- * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
+ * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
* more efficient.
*/
#define __ia64_per_cpu_var(var) (*({ \
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
new file mode 100644
index 000000000000..d2f99ca1e3a6
--- /dev/null
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_CPUIDLE_H
+#define _ASM_POWERPC_CPUIDLE_H
+
+#ifdef CONFIG_PPC_POWERNV
+/* Used in powernv idle state management */
+#define PNV_THREAD_RUNNING 0
+#define PNV_THREAD_NAP 1
+#define PNV_THREAD_SLEEP 2
+#define PNV_THREAD_WINKLE 3
+#define PNV_CORE_IDLE_LOCK_BIT 0x100
+#define PNV_CORE_IDLE_THREAD_BITS 0x0FF
+
+#ifndef __ASSEMBLY__
+extern u32 pnv_fastsleep_workaround_at_entry[];
+extern u32 pnv_fastsleep_workaround_at_exit[];
+#endif
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 5cd8d2fddba9..eb95b675109b 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -56,6 +56,14 @@ struct opal_sg_list {
#define OPAL_HARDWARE_FROZEN -13
#define OPAL_WRONG_STATE -14
#define OPAL_ASYNC_COMPLETION -15
+#define OPAL_I2C_TIMEOUT -17
+#define OPAL_I2C_INVALID_CMD -18
+#define OPAL_I2C_LBUS_PARITY -19
+#define OPAL_I2C_BKEND_OVERRUN -20
+#define OPAL_I2C_BKEND_ACCESS -21
+#define OPAL_I2C_ARBT_LOST -22
+#define OPAL_I2C_NACK_RCVD -23
+#define OPAL_I2C_STOP_ERR -24
/* API Tokens (in r0) */
#define OPAL_INVALID_CALL -1
@@ -152,12 +160,25 @@ struct opal_sg_list {
#define OPAL_PCI_ERR_INJECT 96
#define OPAL_PCI_EEH_FREEZE_SET 97
#define OPAL_HANDLE_HMI 98
+#define OPAL_CONFIG_CPU_IDLE_STATE 99
+#define OPAL_SLW_SET_REG 100
#define OPAL_REGISTER_DUMP_REGION 101
#define OPAL_UNREGISTER_DUMP_REGION 102
#define OPAL_WRITE_TPO 103
#define OPAL_READ_TPO 104
#define OPAL_IPMI_SEND 107
#define OPAL_IPMI_RECV 108
+#define OPAL_I2C_REQUEST 109
+
+/* Device tree flags */
+
+/* Flags set in power-mgmt nodes in device tree if
+ * respective idle states are supported in the platform.
+ */
+#define OPAL_PM_NAP_ENABLED 0x00010000
+#define OPAL_PM_SLEEP_ENABLED 0x00020000
+#define OPAL_PM_WINKLE_ENABLED 0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000
#ifndef __ASSEMBLY__
@@ -712,6 +733,24 @@ typedef struct oppanel_line {
uint64_t line_len;
} oppanel_line_t;
+/* OPAL I2C request */
+struct opal_i2c_request {
+ uint8_t type;
+#define OPAL_I2C_RAW_READ 0
+#define OPAL_I2C_RAW_WRITE 1
+#define OPAL_I2C_SM_READ 2
+#define OPAL_I2C_SM_WRITE 3
+ uint8_t flags;
+#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
+ uint8_t subaddr_sz; /* Max 4 */
+ uint8_t reserved;
+ __be16 addr; /* 7 or 10 bit address */
+ __be16 reserved2;
+ __be32 subaddr; /* Sub-address if any */
+ __be32 size; /* Data size */
+ __be64 buffer_ra; /* Buffer real address */
+};
+
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
@@ -876,11 +915,14 @@ int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
+int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t msg_len);
int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t *msg_len);
+int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
+ struct opal_i2c_request *oreq);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 24a386cbb928..e5f22c6c4bf9 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -152,6 +152,16 @@ struct paca_struct {
u64 tm_scratch; /* TM scratch area for reclaim */
#endif
+#ifdef CONFIG_PPC_POWERNV
+ /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
+ u32 *core_idle_state_ptr;
+ u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */
+ /* Mask to indicate thread id in core */
+ u8 thread_mask;
+ /* Mask to denote subcore sibling threads */
+ u8 subcore_sibling_mask;
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
/* Exclusive emergency stack pointer for machine check exception. */
void *mc_emergency_sp;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 1a5287759fc8..03cd858a401c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -194,6 +194,7 @@
#define PPC_INST_NAP 0x4c000364
#define PPC_INST_SLEEP 0x4c0003a4
+#define PPC_INST_WINKLE 0x4c0003e4
/* A2 specific instructions */
#define PPC_INST_ERATWE 0x7c0001a6
@@ -375,6 +376,7 @@
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE)
/* BHRB instructions */
#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 29c3798cf800..bf117d8fb45f 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -452,7 +452,8 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern unsigned long power7_nap(int check_irq);
-extern void power7_sleep(void);
+extern unsigned long power7_sleep(void);
+extern unsigned long power7_winkle(void);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c998279bd85b..1c874fb533bb 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -118,8 +118,10 @@
#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
#ifdef __BIG_ENDIAN__
#define MSR_ __MSR
+#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV)
#else
#define MSR_ (__MSR | MSR_LE)
+#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV | MSR_LE)
#endif
#define MSR_KERNEL (MSR_ | MSR_64BIT)
#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
@@ -371,6 +373,7 @@
#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
#define SPRN_PPR 0x380 /* SMT Thread status Register */
+#define SPRN_TSCR 0x399 /* Thread Switch Control Register */
#define SPRN_DEC 0x016 /* Decrement Register */
#define SPRN_DER 0x095 /* Debug Enable Regsiter */
@@ -728,6 +731,7 @@
#define SPRN_BESCR 806 /* Branch event status and control register */
#define BESCR_GE 0x8000000000000000ULL /* Global Enable */
#define SPRN_WORT 895 /* Workload optimization register - thread */
+#define SPRN_WORC 863 /* Workload optimization register - core */
#define SPRN_PMC1 787
#define SPRN_PMC2 788
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 6240698fee9a..ff21b7a2f0cc 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -90,6 +90,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
static inline int syscall_get_arch(void)
{
- return is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+ int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+#ifdef __LITTLE_ENDIAN__
+ arch |= __AUDIT_ARCH_LE;
+#endif
+ return arch;
}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 9485b43a7c00..a0c071d24e0e 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -284,7 +284,7 @@ do { \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
#endif /* __powerpc64__ */
@@ -297,7 +297,7 @@ do { \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -308,7 +308,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 24d78e1871c9..e624f9646350 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -726,5 +726,16 @@ int main(void)
arch.timing_last_enter.tv32.tbl));
#endif
+#ifdef CONFIG_PPC_POWERNV
+ DEFINE(PACA_CORE_IDLE_STATE_PTR,
+ offsetof(struct paca_struct, core_idle_state_ptr));
+ DEFINE(PACA_THREAD_IDLE_STATE,
+ offsetof(struct paca_struct, thread_idle_state));
+ DEFINE(PACA_THREAD_MASK,
+ offsetof(struct paca_struct, thread_mask));
+ DEFINE(PACA_SUBCORE_SIBLING_MASK,
+ offsetof(struct paca_struct, subcore_sibling_mask));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index db08382e19f1..c2df8150bd7a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -15,6 +15,7 @@
#include <asm/hw_irq.h>
#include <asm/exception-64s.h>
#include <asm/ptrace.h>
+#include <asm/cpuidle.h>
/*
* We layout physical memory as follows:
@@ -101,23 +102,34 @@ system_reset_pSeries:
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
/* Running native on arch 2.06 or later, check if we are
- * waking up from nap. We only handle no state loss and
- * supervisor state loss. We do -not- handle hypervisor
- * state loss at this time.
+ * waking up from nap/sleep/winkle.
*/
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
beq 9f
- /* waking up from powersave (nap) state */
- cmpwi cr1,r13,2
- /* Total loss of HV state is fatal, we could try to use the
- * PIR to locate a PACA, then use an emergency stack etc...
- * OPAL v3 based powernv platforms have new idle states
- * which fall in this catagory.
+ cmpwi cr3,r13,2
+
+ /*
+ * Check if last bit of HSPGR0 is set. This indicates whether we are
+ * waking up from winkle.
*/
- bgt cr1,8f
GET_PACA(r13)
+ clrldi r5,r13,63
+ clrrdi r13,r13,1
+ cmpwi cr4,r5,1
+ mtspr SPRN_HSPRG0,r13
+
+ lbz r0,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr2,r0,PNV_THREAD_NAP
+ bgt cr2,8f /* Either sleep or Winkle */
+
+ /* Waking up from nap should not cause hypervisor state loss */
+ bgt cr3,.
+
+ /* Waking up from nap */
+ li r0,PNV_THREAD_RUNNING
+ stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
li r0,KVM_HWTHREAD_IN_KERNEL
@@ -133,7 +145,7 @@ BEGIN_FTR_SECTION
/* Return SRR1 from power7_nap() */
mfspr r3,SPRN_SRR1
- beq cr1,2f
+ beq cr3,2f
b power7_wakeup_noloss
2: b power7_wakeup_loss
@@ -1382,6 +1394,7 @@ machine_check_handle_early:
MACHINE_CHECK_HANDLER_WINDUP
GET_PACA(r13)
ld r1,PACAR1(r13)
+ li r3,PNV_THREAD_NAP
b power7_enter_nap_mode
4:
#endif
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 18c0687e5ab3..05adc8bbdef8 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -18,9 +18,25 @@
#include <asm/hw_irq.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/opal.h>
+#include <asm/cpuidle.h>
+#include <asm/mmu-hash64.h>
#undef DEBUG
+/*
+ * Use unused space in the interrupt stack to save and restore
+ * registers for winkle support.
+ */
+#define _SDR1 GPR3
+#define _RPR GPR4
+#define _SPURR GPR5
+#define _PURR GPR6
+#define _TSCR GPR7
+#define _DSCR GPR8
+#define _AMOR GPR9
+#define _WORT GPR10
+#define _WORC GPR11
+
/* Idle state entry routines */
#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
@@ -37,8 +53,7 @@
/*
* Pass requested state in r3:
- * 0 - nap
- * 1 - sleep
+ * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
*
* To check IRQ_HAPPENED in r4
* 0 - don't check
@@ -101,18 +116,105 @@ _GLOBAL(power7_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-_GLOBAL(power7_enter_nap_mode)
+ /*
+ * Go to real mode to do the nap, as required by the architecture.
+ * Also, we need to be in real mode before setting hwthread_state,
+ * because as soon as we do that, another thread can switch
+ * the MMU context to the guest.
+ */
+ LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
+ li r6, MSR_RI
+ andc r6, r9, r6
+ LOAD_REG_ADDR(r7, power7_enter_nap_mode)
+ mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
+ mtspr SPRN_SRR0, r7
+ mtspr SPRN_SRR1, r5
+ rfid
+
+ .globl power7_enter_nap_mode
+power7_enter_nap_mode:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
- cmpwi cr0,r3,1
- beq 2f
+ stb r3,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr3,r3,PNV_THREAD_SLEEP
+ bge cr3,2f
IDLE_STATE_ENTER_SEQ(PPC_NAP)
/* No return */
-2: IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
- /* No return */
+2:
+ /* Sleep or winkle */
+ lbz r7,PACA_THREAD_MASK(r13)
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+lwarx_loop1:
+ lwarx r15,0,r14
+ andc r15,r15,r7 /* Clear thread bit */
+
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+
+/*
+ * If cr0 = 0, then current thread is the last thread of the core entering
+ * sleep. Last thread needs to execute the hardware bug workaround code if
+ * required by the platform.
+ * Make the workaround call unconditionally here. The below branch call is
+ * patched out when the idle states are discovered if the platform does not
+ * require it.
+ */
+.global pnv_fastsleep_workaround_at_entry
+pnv_fastsleep_workaround_at_entry:
+ beq fastsleep_workaround_at_entry
+
+ stwcx. r15,0,r14
+ bne- lwarx_loop1
+ isync
+
+common_enter: /* common code for all the threads entering sleep or winkle */
+ bgt cr3,enter_winkle
+ IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+
+fastsleep_workaround_at_entry:
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ stwcx. r15,0,r14
+ bne- lwarx_loop1
+ isync
+
+ /* Fast sleep workaround */
+ li r3,1
+ li r4,1
+ li r0,OPAL_CONFIG_CPU_IDLE_STATE
+ bl opal_call_realmode
+
+ /* Clear Lock bit */
+ li r0,0
+ lwsync
+ stw r0,0(r14)
+ b common_enter
+
+enter_winkle:
+ /*
+ * Note all register i.e per-core, per-subcore or per-thread is saved
+ * here since any thread in the core might wake up first
+ */
+ mfspr r3,SPRN_SDR1
+ std r3,_SDR1(r1)
+ mfspr r3,SPRN_RPR
+ std r3,_RPR(r1)
+ mfspr r3,SPRN_SPURR
+ std r3,_SPURR(r1)
+ mfspr r3,SPRN_PURR
+ std r3,_PURR(r1)
+ mfspr r3,SPRN_TSCR
+ std r3,_TSCR(r1)
+ mfspr r3,SPRN_DSCR
+ std r3,_DSCR(r1)
+ mfspr r3,SPRN_AMOR
+ std r3,_AMOR(r1)
+ mfspr r3,SPRN_WORT
+ std r3,_WORT(r1)
+ mfspr r3,SPRN_WORC
+ std r3,_WORC(r1)
+ IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
_GLOBAL(power7_idle)
/* Now check if user or arch enabled NAP mode */
@@ -125,48 +227,21 @@ _GLOBAL(power7_idle)
_GLOBAL(power7_nap)
mr r4,r3
- li r3,0
+ li r3,PNV_THREAD_NAP
b power7_powersave_common
/* No return */
_GLOBAL(power7_sleep)
- li r3,1
+ li r3,PNV_THREAD_SLEEP
li r4,1
b power7_powersave_common
/* No return */
-/*
- * Make opal call in realmode. This is a generic function to be called
- * from realmode from reset vector. It handles endianess.
- *
- * r13 - paca pointer
- * r1 - stack pointer
- * r3 - opal token
- */
-opal_call_realmode:
- mflr r12
- std r12,_LINK(r1)
- ld r2,PACATOC(r13)
- /* Set opal return address */
- LOAD_REG_ADDR(r0,return_from_opal_call)
- mtlr r0
- /* Handle endian-ness */
- li r0,MSR_LE
- mfmsr r12
- andc r12,r12,r0
- mtspr SPRN_HSRR1,r12
- mr r0,r3 /* Move opal token to r0 */
- LOAD_REG_ADDR(r11,opal)
- ld r12,8(r11)
- ld r2,0(r11)
- mtspr SPRN_HSRR0,r12
- hrfid
-
-return_from_opal_call:
- FIXUP_ENDIAN
- ld r0,_LINK(r1)
- mtlr r0
- blr
+_GLOBAL(power7_winkle)
+ li r3,3
+ li r4,1
+ b power7_powersave_common
+ /* No return */
#define CHECK_HMI_INTERRUPT \
mfspr r0,SPRN_SRR1; \
@@ -181,7 +256,7 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
ld r2,PACATOC(r13); \
ld r1,PACAR1(r13); \
std r3,ORIG_GPR3(r1); /* Save original r3 */ \
- li r3,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
+ li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
bl opal_call_realmode; \
ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
20: nop;
@@ -190,16 +265,190 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
_GLOBAL(power7_wakeup_tb_loss)
ld r2,PACATOC(r13);
ld r1,PACAR1(r13)
+ /*
+ * Before entering any idle state, the NVGPRs are saved in the stack
+ * and they are restored before switching to the process context. Hence
+ * until they are restored, they are free to be used.
+ *
+ * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
+ * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
+ * wakeup reason if we branch to kvm_start_guest.
+ */
+ mfspr r16,SPRN_SRR1
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+
+ lbz r7,PACA_THREAD_MASK(r13)
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+lwarx_loop2:
+ lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ /*
+ * Lock bit is set in one of the 2 cases-
+ * a. In the sleep/winkle enter path, the last thread is executing
+ * fastsleep workaround code.
+ * b. In the wake up path, another thread is executing fastsleep
+ * workaround undo code or resyncing timebase or restoring context
+ * In either case loop until the lock bit is cleared.
+ */
+ bne core_idle_lock_held
+
+ cmpwi cr2,r15,0
+ lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
+ and r4,r4,r15
+ cmpwi cr1,r4,0 /* Check if first in subcore */
+
+ /*
+ * At this stage
+ * cr1 - 0b0100 if first thread to wakeup in subcore
+ * cr2 - 0b0100 if first thread to wakeup in core
+ * cr3- 0b0010 if waking up from sleep or winkle
+ * cr4 - 0b0100 if waking up from winkle
+ */
+
+ or r15,r15,r7 /* Set thread bit */
+
+ beq cr1,first_thread_in_subcore
+
+ /* Not first thread in subcore to wake up */
+ stwcx. r15,0,r14
+ bne- lwarx_loop2
+ isync
+ b common_exit
+
+core_idle_lock_held:
+ HMT_LOW
+core_idle_lock_loop:
+ lwz r15,0(14)
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_loop
+ HMT_MEDIUM
+ b lwarx_loop2
+
+first_thread_in_subcore:
+ /* First thread in subcore to wakeup */
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ stwcx. r15,0,r14
+ bne- lwarx_loop2
+ isync
+
+ /*
+ * If waking up from sleep, subcore state is not lost. Hence
+ * skip subcore state restore
+ */
+ bne cr4,subcore_state_restored
+
+ /* Restore per-subcore state */
+ ld r4,_SDR1(r1)
+ mtspr SPRN_SDR1,r4
+ ld r4,_RPR(r1)
+ mtspr SPRN_RPR,r4
+ ld r4,_AMOR(r1)
+ mtspr SPRN_AMOR,r4
+
+subcore_state_restored:
+ /*
+ * Check if the thread is also the first thread in the core. If not,
+ * skip to clear_lock.
+ */
+ bne cr2,clear_lock
+
+first_thread_in_core:
+
+ /*
+ * First thread in the core waking up from fastsleep. It needs to
+ * call the fastsleep workaround code if the platform requires it.
+ * Call it unconditionally here. The below branch instruction will
+ * be patched out when the idle states are discovered if platform
+ * does not require workaround.
+ */
+.global pnv_fastsleep_workaround_at_exit
+pnv_fastsleep_workaround_at_exit:
+ b fastsleep_workaround_at_exit
+
+timebase_resync:
+ /* Do timebase resync if we are waking up from sleep. Use cr3 value
+ * set in exceptions-64s.S */
+ ble cr3,clear_lock
/* Time base re-sync */
- li r3,OPAL_RESYNC_TIMEBASE
+ li r0,OPAL_RESYNC_TIMEBASE
bl opal_call_realmode;
-
/* TODO: Check r3 for failure */
+ /*
+ * If waking up from sleep, per core state is not lost, skip to
+ * clear_lock.
+ */
+ bne cr4,clear_lock
+
+ /* Restore per core state */
+ ld r4,_TSCR(r1)
+ mtspr SPRN_TSCR,r4
+ ld r4,_WORC(r1)
+ mtspr SPRN_WORC,r4
+
+clear_lock:
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+ lwsync
+ stw r15,0(r14)
+
+common_exit:
+ /*
+ * Common to all threads.
+ *
+ * If waking up from sleep, hypervisor state is not lost. Hence
+ * skip hypervisor state restore.
+ */
+ bne cr4,hypervisor_state_restored
+
+ /* Waking up from winkle */
+
+ /* Restore per thread state */
+ bl __restore_cpu_power8
+
+ /* Restore SLB from PACA */
+ ld r8,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ li r3, SLBSHADOW_SAVEAREA
+ LDX_BE r5, r8, r3
+ addi r3, r3, 8
+ LDX_BE r6, r8, r3
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r8,r8,16
+ .endr
+
+ ld r4,_SPURR(r1)
+ mtspr SPRN_SPURR,r4
+ ld r4,_PURR(r1)
+ mtspr SPRN_PURR,r4
+ ld r4,_DSCR(r1)
+ mtspr SPRN_DSCR,r4
+ ld r4,_WORT(r1)
+ mtspr SPRN_WORT,r4
+
+hypervisor_state_restored:
+
+ li r5,PNV_THREAD_RUNNING
+ stb r5,PACA_THREAD_IDLE_STATE(r13)
+
+ mtspr SPRN_SRR1,r16
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ li r0,KVM_HWTHREAD_IN_KERNEL
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ sync
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r0,0
+ beq 6f
+ b kvm_start_guest
+6:
+#endif
+
REST_NVGPRS(r1)
REST_GPR(2, r1)
ld r3,_CCR(r1)
@@ -212,6 +461,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
mtspr SPRN_SRR0,r5
rfid
+fastsleep_workaround_at_exit:
+ li r3,1
+ li r4,0
+ li r0,OPAL_CONFIG_CPU_IDLE_STATE
+ bl opal_call_realmode
+ b timebase_resync
+
/*
* R3 here contains the value that will be returned to the caller
* of power7_nap.
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8b2d2dc8ef10..8ec017cb4446 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -700,7 +700,6 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
- cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@@ -739,6 +738,14 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
+ /*
+ * CPU must be marked active and online before we signal back to the
+ * master, because the scheduler needs to see the cpu_online and
+ * cpu_active bits set.
+ */
+ smp_wmb();
+ cpu_callin_map[cpu] = 1;
+
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index dba34088da28..f162d0b8eea3 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -177,7 +177,7 @@ static ssize_t _name##_show(struct device *dev, \
} \
ret = sprintf(buf, _fmt, _expr); \
e_free: \
- kfree(page); \
+ kmem_cache_free(hv_page_cache, page); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -217,11 +217,14 @@ static bool is_physical_domain(int domain)
domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
}
+DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
+DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
+
static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
u16 lpar, u64 *res,
bool success_expected)
{
- unsigned long ret = -ENOMEM;
+ unsigned long ret;
/*
* request_buffer and result_buffer are not required to be 4k aligned,
@@ -243,13 +246,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
- request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
- if (!request_buffer)
- goto out;
+ request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
+ result_buffer = (void *)get_cpu_var(hv_24x7_resb);
- result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
- if (!result_buffer)
- goto out_free_request_buffer;
+ memset(request_buffer, 0, 4096);
+ memset(result_buffer, 0, 4096);
*request_buffer = (struct reqb) {
.buf = {
@@ -278,15 +279,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
domain, offset, ix, lpar, ret, ret,
result_buffer->buf.detailed_rc,
result_buffer->buf.failing_request_ix);
- goto out_free_result_buffer;
+ goto out;
}
*res = be64_to_cpu(result_buffer->result);
-out_free_result_buffer:
- kfree(result_buffer);
-out_free_request_buffer:
- kfree(request_buffer);
out:
return ret;
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 0a299be588af..54eca8b3b288 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -158,6 +158,43 @@ opal_tracepoint_return:
blr
#endif
+/*
+ * Make opal call in realmode. This is a generic function to be called
+ * from realmode. It handles endianness.
+ *
+ * r13 - paca pointer
+ * r1 - stack pointer
+ * r0 - opal token
+ */
+_GLOBAL(opal_call_realmode)
+ mflr r12
+ std r12,PPC_LR_STKOFF(r1)
+ ld r2,PACATOC(r13)
+ /* Set opal return address */
+ LOAD_REG_ADDR(r12,return_from_opal_call)
+ mtlr r12
+
+ mfmsr r12
+#ifdef __LITTLE_ENDIAN__
+ /* Handle endian-ness */
+ li r11,MSR_LE
+ andc r12,r12,r11
+#endif
+ mtspr SPRN_HSRR1,r12
+ LOAD_REG_ADDR(r11,opal)
+ ld r12,8(r11)
+ ld r2,0(r11)
+ mtspr SPRN_HSRR0,r12
+ hrfid
+
+return_from_opal_call:
+#ifdef __LITTLE_ENDIAN__
+ FIXUP_ENDIAN
+#endif
+ ld r12,PPC_LR_STKOFF(r1)
+ mtlr r12
+ blr
+
OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL);
OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
@@ -247,6 +284,7 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
+OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
@@ -254,3 +292,4 @@ OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
+OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index cb0b6de79cd4..f10b9ec8c1f5 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -9,8 +9,9 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
+#define pr_fmt(fmt) "opal: " fmt
+#include <linux/printk.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -625,6 +626,39 @@ static int opal_sysfs_init(void)
return 0;
}
+static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ return memory_read_from_buffer(buf, count, &off, bin_attr->private,
+ bin_attr->size);
+}
+
+static BIN_ATTR_RO(symbol_map, 0);
+
+static void opal_export_symmap(void)
+{
+ const __be64 *syms;
+ unsigned int size;
+ struct device_node *fw;
+ int rc;
+
+ fw = of_find_node_by_path("/ibm,opal/firmware");
+ if (!fw)
+ return;
+ syms = of_get_property(fw, "symbol-map", &size);
+ if (!syms || size != 2 * sizeof(__be64))
+ return;
+
+ /* Setup attributes */
+ bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
+ bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+
+ rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+ if (rc)
+ pr_warn("Error %d creating OPAL symbols file\n", rc);
+}
+
static void __init opal_dump_region_init(void)
{
void *addr;
@@ -653,6 +687,14 @@ static void opal_ipmi_init(struct device_node *opal_node)
of_platform_device_create(np, NULL, NULL);
}
+static void opal_i2c_create_devs(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "ibm,opal-i2c")
+ of_platform_device_create(np, NULL, NULL);
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
@@ -679,6 +721,9 @@ static int __init opal_init(void)
of_node_put(consoles);
}
+ /* Create i2c platform devices */
+ opal_i2c_create_devs();
+
/* Find all OPAL interrupts and request them */
irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
pr_debug("opal: Found %d interrupts reserved for OPAL\n",
@@ -702,6 +747,8 @@ static int __init opal_init(void)
/* Create "opal" kobject under /sys/firmware */
rc = opal_sysfs_init();
if (rc == 0) {
+ /* Export symbol map to userspace */
+ opal_export_symmap();
/* Setup dump region interface */
opal_dump_region_init();
/* Setup error log interface */
@@ -824,3 +871,4 @@ EXPORT_SYMBOL_GPL(opal_rtc_read);
EXPORT_SYMBOL_GPL(opal_rtc_write);
EXPORT_SYMBOL_GPL(opal_tpo_read);
EXPORT_SYMBOL_GPL(opal_tpo_write);
+EXPORT_SYMBOL_GPL(opal_i2c_request);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 6c8e2d188cd0..604c48e7879a 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
}
#endif
+extern u32 pnv_get_supported_cpuidle_states(void);
+
extern void pnv_lpc_init(void);
bool cpu_core_split_required(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 30b1c3e298a6..b700a329c31d 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -36,8 +36,12 @@
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/cpuidle.h>
+#include <asm/code-patching.h>
#include "powernv.h"
+#include "subcore.h"
static void __init pnv_setup_arch(void)
{
@@ -288,6 +292,168 @@ static void __init pnv_setup_machdep_rtas(void)
}
#endif /* CONFIG_PPC_POWERNV_RTAS */
+static u32 supported_cpuidle_states;
+
+int pnv_save_sprs_for_winkle(void)
+{
+ int cpu;
+ int rc;
+
+ /*
+ * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
+ * all cpus at boot. Get these reg values of current cpu and use the
+ * same accross all cpus.
+ */
+ uint64_t lpcr_val = mfspr(SPRN_LPCR);
+ uint64_t hid0_val = mfspr(SPRN_HID0);
+ uint64_t hid1_val = mfspr(SPRN_HID1);
+ uint64_t hid4_val = mfspr(SPRN_HID4);
+ uint64_t hid5_val = mfspr(SPRN_HID5);
+ uint64_t hmeer_val = mfspr(SPRN_HMEER);
+
+ for_each_possible_cpu(cpu) {
+ uint64_t pir = get_hard_smp_processor_id(cpu);
+ uint64_t hsprg0_val = (uint64_t)&paca[cpu];
+
+ /*
+ * HSPRG0 is used to store the cpu's pointer to paca. Hence last
+ * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
+ * with 63rd bit set, so that when a thread wakes up at 0x100 we
+ * can use this bit to distinguish between fastsleep and
+ * deep winkle.
+ */
+ hsprg0_val |= 1;
+
+ rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+ if (rc != 0)
+ return rc;
+
+ /* HIDs are per core registers */
+ if (cpu_thread_in_core(cpu) == 0) {
+
+ rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void pnv_alloc_idle_core_states(void)
+{
+ int i, j;
+ int nr_cores = cpu_nr_cores();
+ u32 *core_idle_state;
+
+ /*
+ * core_idle_state - First 8 bits track the idle state of each thread
+ * of the core. The 8th bit is the lock bit. Initially all thread bits
+ * are set. They are cleared when the thread enters deep idle state
+ * like sleep and winkle. Initially the lock bit is cleared.
+ * The lock bit has 2 purposes
+ * a. While the first thread is restoring core state, it prevents
+ * other threads in the core from switching to process context.
+ * b. While the last thread in the core is saving the core state, it
+ * prevents a different thread from waking up.
+ */
+ for (i = 0; i < nr_cores; i++) {
+ int first_cpu = i * threads_per_core;
+ int node = cpu_to_node(first_cpu);
+
+ core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
+ *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
+
+ for (j = 0; j < threads_per_core; j++) {
+ int cpu = first_cpu + j;
+
+ paca[cpu].core_idle_state_ptr = core_idle_state;
+ paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
+ paca[cpu].thread_mask = 1 << j;
+ }
+ }
+
+ update_subcore_sibling_mask();
+
+ if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
+ pnv_save_sprs_for_winkle();
+}
+
+u32 pnv_get_supported_cpuidle_states(void)
+{
+ return supported_cpuidle_states;
+}
+EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
+
+static int __init pnv_init_idle_states(void)
+{
+ struct device_node *power_mgt;
+ int dt_idle_states;
+ const __be32 *idle_state_flags;
+ u32 len_flags, flags;
+ int i;
+
+ supported_cpuidle_states = 0;
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return 0;
+
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ return 0;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("opal: PowerMgmt Node not found\n");
+ return 0;
+ }
+
+ idle_state_flags = of_get_property(power_mgt,
+ "ibm,cpu-idle-state-flags", &len_flags);
+ if (!idle_state_flags) {
+ pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+ return 0;
+ }
+
+ dt_idle_states = len_flags / sizeof(u32);
+
+ for (i = 0; i < dt_idle_states; i++) {
+ flags = be32_to_cpu(idle_state_flags[i]);
+ supported_cpuidle_states |= flags;
+ }
+ if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
+ patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_entry,
+ PPC_INST_NOP);
+ patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_exit,
+ PPC_INST_NOP);
+ }
+ pnv_alloc_idle_core_states();
+ return 0;
+}
+
+subsys_initcall(pnv_init_idle_states);
+
static int __init pnv_probe(void)
{
unsigned long root = of_get_flat_dt_root();
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index b716f666e48a..fc34025ef822 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -150,6 +150,7 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
unsigned long srr1;
+ u32 idle_states;
/* Standard hot unplug procedure */
local_irq_disable();
@@ -160,13 +161,23 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
+ idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
+
ppc64_runlatch_off();
- srr1 = power7_nap(1);
+
+ if (idle_states & OPAL_PM_WINKLE_ENABLED)
+ srr1 = power7_winkle();
+ else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
+ (idle_states & OPAL_PM_SLEEP_ENABLED_ER1))
+ srr1 = power7_sleep();
+ else
+ srr1 = power7_nap(1);
+
ppc64_runlatch_on();
/*
@@ -198,13 +209,27 @@ static void pnv_smp_cpu_kill_self(void)
#endif /* CONFIG_HOTPLUG_CPU */
+static int pnv_cpu_bootable(unsigned int nr)
+{
+ /*
+ * Starting with POWER8, the subcore logic relies on all threads of a
+ * core being booted so that they can participate in split mode
+ * switches. So on those machines we ignore the smt_enabled_at_boot
+ * setting (smt-enabled on the kernel command line).
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return 1;
+
+ return smp_generic_cpu_bootable(nr);
+}
+
static struct smp_ops_t pnv_smp_ops = {
.message_pass = smp_muxed_ipi_message_pass,
.cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
.probe = xics_smp_probe,
.kick_cpu = pnv_smp_kick_cpu,
.setup_cpu = pnv_smp_setup_cpu,
- .cpu_bootable = smp_generic_cpu_bootable,
+ .cpu_bootable = pnv_cpu_bootable,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = pnv_smp_cpu_disable,
.cpu_die = generic_cpu_die,
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index c87f96b79d1a..f60f80ada903 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -160,6 +160,18 @@ static void wait_for_sync_step(int step)
mb();
}
+static void update_hid_in_slw(u64 hid0)
+{
+ u64 idle_states = pnv_get_supported_cpuidle_states();
+
+ if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+ /* OPAL call to patch slw with the new HID0 value */
+ u64 cpu_pir = hard_smp_processor_id();
+
+ opal_slw_set_reg(cpu_pir, SPRN_HID0, hid0);
+ }
+}
+
static void unsplit_core(void)
{
u64 hid0, mask;
@@ -179,6 +191,7 @@ static void unsplit_core(void)
hid0 = mfspr(SPRN_HID0);
hid0 &= ~HID0_POWER8_DYNLPARDIS;
mtspr(SPRN_HID0, hid0);
+ update_hid_in_slw(hid0);
while (mfspr(SPRN_HID0) & mask)
cpu_relax();
@@ -215,6 +228,7 @@ static void split_core(int new_mode)
hid0 = mfspr(SPRN_HID0);
hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
mtspr(SPRN_HID0, hid0);
+ update_hid_in_slw(hid0);
/* Wait for it to happen */
while (!(mfspr(SPRN_HID0) & split_parms[i].mask))
@@ -251,6 +265,25 @@ bool cpu_core_split_required(void)
return true;
}
+void update_subcore_sibling_mask(void)
+{
+ int cpu;
+ /*
+ * sibling mask for the first cpu. Left shift this by required bits
+ * to get sibling mask for the rest of the cpus.
+ */
+ int sibling_mask_first_cpu = (1 << threads_per_subcore) - 1;
+
+ for_each_possible_cpu(cpu) {
+ int tid = cpu_thread_in_core(cpu);
+ int offset = (tid / threads_per_subcore) * threads_per_subcore;
+ int mask = sibling_mask_first_cpu << offset;
+
+ paca[cpu].subcore_sibling_mask = mask;
+
+ }
+}
+
static int cpu_update_split_mode(void *data)
{
int cpu, new_mode = *(int *)data;
@@ -284,6 +317,7 @@ static int cpu_update_split_mode(void *data)
/* Make the new mode public */
subcores_per_core = new_mode;
threads_per_subcore = threads_per_core / subcores_per_core;
+ update_subcore_sibling_mask();
/* Make sure the new mode is written before we exit */
mb();
diff --git a/arch/powerpc/platforms/powernv/subcore.h b/arch/powerpc/platforms/powernv/subcore.h
index 148abc91debf..84e02ae52895 100644
--- a/arch/powerpc/platforms/powernv/subcore.h
+++ b/arch/powerpc/platforms/powernv/subcore.h
@@ -14,5 +14,12 @@
#define SYNC_STEP_FINISHED 3 /* Set by secondary when split/unsplit is done */
#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
void split_core_secondary_loop(u8 *state);
-#endif
+extern void update_subcore_sibling_mask(void);
+#else
+static inline void update_subcore_sibling_mask(void) { };
+#endif /* CONFIG_SMP */
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d69f1cd87fd9..ba397bde7948 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -249,10 +249,6 @@ config HAVE_INTEL_TXT
def_bool y
depends on INTEL_IOMMU && ACPI
-config X86_INTEL_MPX
- def_bool y
- depends on CPU_SUP_INTEL
-
config X86_32_SMP
def_bool y
depends on X86_32 && SMP
@@ -887,11 +883,11 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC
def_bool y
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
+ select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
config X86_IO_APIC
- def_bool y
- depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
- select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+ def_bool X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
+ depends on X86_LOCAL_APIC
select IRQ_DOMAIN
config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -1594,6 +1590,32 @@ config X86_SMAP
If unsure, say Y.
+config X86_INTEL_MPX
+ prompt "Intel MPX (Memory Protection Extensions)"
+ def_bool n
+ depends on CPU_SUP_INTEL
+ ---help---
+ MPX provides hardware features that can be used in
+ conjunction with compiler-instrumented code to check
+ memory references. It is designed to detect buffer
+ overflow or underflow bugs.
+
+ This option enables running applications which are
+ instrumented or otherwise use MPX. It does not use MPX
+ itself inside the kernel or to protect the kernel
+ against bad memory references.
+
+ Enabling this option will make the kernel larger:
+ ~8k of kernel text and 36 bytes of data on a 64-bit
+ defconfig. It adds a long to the 'mm_struct' which
+ will increase the kernel memory overhead of each
+ process and adds some branches to paths used during
+ exec() and munmap().
+
+ For details, see Documentation/x86/intel_mpx.txt
+
+ If unsure, say N.
+
config EFI
bool "EFI runtime service support"
depends on ACPI
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 4615906d83df..9662290e0b20 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -94,30 +94,7 @@ extern void trace_call_function_single_interrupt(void);
#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
#endif /* CONFIG_TRACING */
-/* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
-extern unsigned long io_apic_irqs;
-
-extern void setup_IO_APIC(void);
-extern void disable_IO_APIC(void);
-
-struct io_apic_irq_attr {
- int ioapic;
- int ioapic_pin;
- int trigger;
- int polarity;
-};
-
-static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
- int ioapic, int ioapic_pin,
- int trigger, int polarity)
-{
- irq_attr->ioapic = ioapic;
- irq_attr->ioapic_pin = ioapic_pin;
- irq_attr->trigger = trigger;
- irq_attr->polarity = polarity;
-}
-
+#ifdef CONFIG_IRQ_REMAP
/* Intel specific interrupt remapping information */
struct irq_2_iommu {
struct intel_iommu *iommu;
@@ -131,14 +108,12 @@ struct irq_2_irte {
u16 devid; /* Device ID for IRTE table */
u16 index; /* Index into IRTE table*/
};
+#endif /* CONFIG_IRQ_REMAP */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+struct irq_data;
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * Most irqs are mapped 1:1 with pins.
- */
struct irq_cfg {
- struct irq_pin_list *irq_2_pin;
cpumask_var_t domain;
cpumask_var_t old_domain;
u8 vector;
@@ -150,18 +125,39 @@ struct irq_cfg {
struct irq_2_irte irq_2_irte;
};
#endif
+ union {
+#ifdef CONFIG_X86_IO_APIC
+ struct {
+ struct list_head irq_2_pin;
+ };
+#endif
+ };
};
+extern struct irq_cfg *irq_cfg(unsigned int irq);
+extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
+extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
+extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
+extern void setup_vector_irq(int cpu);
+#ifdef CONFIG_SMP
extern void send_cleanup_vector(struct irq_cfg *);
+extern void irq_complete_move(struct irq_cfg *cfg);
+#else
+static inline void send_cleanup_vector(struct irq_cfg *c) { }
+static inline void irq_complete_move(struct irq_cfg *c) { }
+#endif
-struct irq_data;
-int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
- unsigned int *dest_id);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
-extern void setup_ioapic_dest(void);
-
-extern void enable_IO_APIC(void);
+extern int apic_retrigger_irq(struct irq_data *data);
+extern void apic_ack_edge(struct irq_data *data);
+extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ unsigned int *dest_id);
+#else /* CONFIG_X86_LOCAL_APIC */
+static inline void lock_vector_lock(void) {}
+static inline void unlock_vector_lock(void) {}
+#endif /* CONFIG_X86_LOCAL_APIC */
/* Statistics */
extern atomic_t irq_err_count;
@@ -185,7 +181,8 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif
-extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
+extern void (*__initconst interrupt[FIRST_SYSTEM_VECTOR
+ - FIRST_EXTERNAL_VECTOR])(void);
#ifdef CONFIG_TRACING
#define trace_interrupt interrupt
#endif
@@ -195,17 +192,6 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern void setup_vector_irq(int cpu);
-
-#ifdef CONFIG_X86_IO_APIC
-extern void lock_vector_lock(void);
-extern void unlock_vector_lock(void);
-extern void __setup_vector_irq(int cpu);
-#else
-static inline void lock_vector_lock(void) {}
-static inline void unlock_vector_lock(void) {}
-static inline void __setup_vector_irq(int cpu) {}
-#endif
#endif /* !ASSEMBLY_ */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 1733ab49ac5e..bf006cce9418 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -132,6 +132,10 @@ extern int noioapicquirk;
/* -1 if "noapic" boot option passed */
extern int noioapicreroute;
+extern unsigned long io_apic_irqs;
+
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
+
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
@@ -139,18 +143,15 @@ extern int noioapicreroute;
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
-struct io_apic_irq_attr;
struct irq_cfg;
extern void ioapic_insert_resources(void);
+extern int arch_early_ioapic_init(void);
extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
unsigned int, int,
struct io_apic_irq_attr *);
extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
-extern void native_compose_msi_msg(struct pci_dev *pdev,
- unsigned int irq, unsigned int dest,
- struct msi_msg *msg, u8 hpet_id);
extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
extern int save_ioapic_entries(void);
@@ -160,6 +161,13 @@ extern int restore_ioapic_entries(void);
extern void setup_ioapic_ids_from_mpc(void);
extern void setup_ioapic_ids_from_mpc_nocheck(void);
+struct io_apic_irq_attr {
+ int ioapic;
+ int ioapic_pin;
+ int trigger;
+ int polarity;
+};
+
enum ioapic_domain_type {
IOAPIC_DOMAIN_INVALID,
IOAPIC_DOMAIN_LEGACY,
@@ -188,8 +196,10 @@ extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
extern u32 mp_pin_to_gsi(int ioapic, int pin);
extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags);
extern void mp_unmap_irq(int irq);
-extern void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
- struct ioapic_domain_cfg *cfg);
+extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+ struct ioapic_domain_cfg *cfg);
+extern int mp_unregister_ioapic(u32 gsi_base);
+extern int mp_ioapic_registered(u32 gsi_base);
extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq);
extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq);
@@ -227,19 +237,25 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
extern void io_apic_eoi(unsigned int apic, unsigned int vector);
-extern bool mp_should_keep_irq(struct device *dev);
-
+extern void setup_IO_APIC(void);
+extern void enable_IO_APIC(void);
+extern void disable_IO_APIC(void);
+extern void setup_ioapic_dest(void);
+extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
+extern void print_IO_APICs(void);
#else /* !CONFIG_X86_IO_APIC */
+#define IO_APIC_IRQ(x) 0
#define io_apic_assign_pci_irqs 0
#define setup_ioapic_ids_from_mpc x86_init_noop
static inline void ioapic_insert_resources(void) { }
+static inline int arch_early_ioapic_init(void) { return 0; }
+static inline void print_IO_APICs(void) {}
#define gsi_top (NR_IRQS_LEGACY)
static inline int mp_find_ioapic(u32 gsi) { return 0; }
static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
static inline void mp_unmap_irq(int irq) { }
-static inline bool mp_should_keep_irq(struct device *dev) { return 1; }
static inline int save_ioapic_entries(void)
{
@@ -262,7 +278,6 @@ static inline void disable_ioapic_support(void) { }
#define native_io_apic_print_entries NULL
#define native_ioapic_set_affinity NULL
#define native_setup_ioapic_entry NULL
-#define native_compose_msi_msg NULL
#define native_eoi_ioapic_pin NULL
#endif
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 5702d7e3111d..666c89ec4bd7 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -126,6 +126,12 @@
#define NR_VECTORS 256
+#ifdef CONFIG_X86_LOCAL_APIC
+#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#else
+#define FIRST_SYSTEM_VECTOR NR_VECTORS
+#endif
+
#define FPU_IRQ 13
#define FIRST_VM86_IRQ 3
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 0892ea0e683f..4e370a5d8117 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -96,12 +96,15 @@ extern void pci_iommu_alloc(void);
#ifdef CONFIG_PCI_MSI
/* implemented in arch/x86/kernel/apic/io_apic. */
struct msi_desc;
+void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq,
+ unsigned int dest, struct msi_msg *msg, u8 hpet_id);
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void native_teardown_msi_irq(unsigned int irq);
void native_restore_msi_irqs(struct pci_dev *dev);
int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
unsigned int irq_base, unsigned int irq_offset);
#else
+#define native_compose_msi_msg NULL
#define native_setup_msi_irqs NULL
#define native_teardown_msi_irq NULL
#endif
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195dae425..164e3f8d3c3d 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
index 46727eb37bfe..6e1aaf73852a 100644
--- a/arch/x86/include/uapi/asm/ldt.h
+++ b/arch/x86/include/uapi/asm/ldt.h
@@ -28,6 +28,13 @@ struct user_desc {
unsigned int seg_not_present:1;
unsigned int useable:1;
#ifdef __x86_64__
+ /*
+ * Because this bit is not present in 32-bit user code, user
+ * programs can pass uninitialized values here. Therefore, in
+ * any context in which a user_desc comes from a 32-bit program,
+ * the kernel must act as though lm == 0, regardless of the
+ * actual value.
+ */
unsigned int lm:1;
#endif
};
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a142e77693e1..4433a4be8171 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -76,6 +76,19 @@ int acpi_fix_pin2_polarity __initdata;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#endif
+/*
+ * Locks related to IOAPIC hotplug
+ * Hotplug side:
+ * ->device_hotplug_lock
+ * ->acpi_ioapic_lock
+ * ->ioapic_lock
+ * Interrupt mapping side:
+ * ->acpi_ioapic_lock
+ * ->ioapic_mutex
+ * ->ioapic_lock
+ */
+static DEFINE_MUTEX(acpi_ioapic_lock);
+
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
@@ -395,10 +408,6 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
return gsi;
- /* Don't set up the ACPI SCI because it's already set up */
- if (acpi_gbl_FADT.sci_interrupt == gsi)
- return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
-
trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
@@ -411,7 +420,8 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
if (irq < 0)
return irq;
- if (enable_update_mptable)
+ /* Don't set up the ACPI SCI because it's already set up */
+ if (enable_update_mptable && acpi_gbl_FADT.sci_interrupt != gsi)
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
return irq;
@@ -424,9 +434,6 @@ static void mp_unregister_gsi(u32 gsi)
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
return;
- if (acpi_gbl_FADT.sci_interrupt == gsi)
- return;
-
irq = mp_map_gsi_to_irq(gsi, 0);
if (irq > 0)
mp_unmap_irq(irq);
@@ -609,8 +616,10 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
*irqp = gsi;
} else {
+ mutex_lock(&acpi_ioapic_lock);
irq = mp_map_gsi_to_irq(gsi,
IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
+ mutex_unlock(&acpi_ioapic_lock);
if (irq < 0)
return -1;
*irqp = irq;
@@ -650,7 +659,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
int irq = gsi;
#ifdef CONFIG_X86_IO_APIC
+ mutex_lock(&acpi_ioapic_lock);
irq = mp_register_gsi(dev, gsi, trigger, polarity);
+ mutex_unlock(&acpi_ioapic_lock);
#endif
return irq;
@@ -659,7 +670,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
static void acpi_unregister_gsi_ioapic(u32 gsi)
{
#ifdef CONFIG_X86_IO_APIC
+ mutex_lock(&acpi_ioapic_lock);
mp_unregister_gsi(gsi);
+ mutex_unlock(&acpi_ioapic_lock);
#endif
}
@@ -690,6 +703,7 @@ void acpi_unregister_gsi(u32 gsi)
}
EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+#ifdef CONFIG_X86_LOCAL_APIC
static void __init acpi_set_irq_model_ioapic(void)
{
acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
@@ -697,6 +711,7 @@ static void __init acpi_set_irq_model_ioapic(void)
__acpi_unregister_gsi = acpi_unregister_gsi_ioapic;
acpi_ioapic = 1;
}
+#endif
/*
* ACPI based hotplug support for CPU
@@ -759,20 +774,74 @@ EXPORT_SYMBOL(acpi_unmap_lsapic);
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
{
- /* TBD */
- return -EINVAL;
-}
+ int ret = -ENOSYS;
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+ int ioapic_id;
+ u64 addr;
+ struct ioapic_domain_cfg cfg = {
+ .type = IOAPIC_DOMAIN_DYNAMIC,
+ .ops = &acpi_irqdomain_ops,
+ };
+
+ ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
+ if (ioapic_id < 0) {
+ unsigned long long uid;
+ acpi_status status;
+ status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
+ NULL, &uid);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle, "failed to get IOAPIC ID.\n");
+ return -EINVAL;
+ }
+ ioapic_id = (int)uid;
+ }
+
+ mutex_lock(&acpi_ioapic_lock);
+ ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg);
+ mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+ return ret;
+}
EXPORT_SYMBOL(acpi_register_ioapic);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
{
- /* TBD */
- return -EINVAL;
-}
+ int ret = -ENOSYS;
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+ mutex_lock(&acpi_ioapic_lock);
+ ret = mp_unregister_ioapic(gsi_base);
+ mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+ return ret;
+}
EXPORT_SYMBOL(acpi_unregister_ioapic);
+/**
+ * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
+ * has been registered
+ * @handle: ACPI handle of the IOAPIC deivce
+ * @gsi_base: GSI base associated with the IOAPIC
+ *
+ * Assume caller holds some type of lock to serialize acpi_ioapic_registered()
+ * with acpi_register_ioapic()/acpi_unregister_ioapic().
+ */
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
+{
+ int ret = 0;
+
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+ mutex_lock(&acpi_ioapic_lock);
+ ret = mp_ioapic_registered(gsi_base);
+ mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+ return ret;
+}
+
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
struct acpi_table_boot *sb;
@@ -1185,7 +1254,9 @@ static void __init acpi_process_madt(void)
/*
* Parse MADT IO-APIC entries
*/
+ mutex_lock(&acpi_ioapic_lock);
error = acpi_parse_madt_ioapic_entries();
+ mutex_unlock(&acpi_ioapic_lock);
if (!error) {
acpi_set_irq_model_ioapic();
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index dcb5b15401ce..8bb12ddc5db8 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -2,10 +2,12 @@
# Makefile for local APIC drivers and for the IO-APIC code
#
-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o
+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o
obj-y += hw_nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
+obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_HT_IRQ) += htirq.o
obj-$(CONFIG_SMP) += ipi.o
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ba6cc041edb1..29b5b18afa27 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -196,7 +196,7 @@ static int disable_apic_timer __initdata;
int local_apic_timer_c2_ok;
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
-int first_system_vector = 0xfe;
+int first_system_vector = FIRST_SYSTEM_VECTOR;
/*
* Debug level, exported for io_apic.c
@@ -1930,7 +1930,7 @@ int __init APIC_init_uniprocessor(void)
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
-static inline void __smp_spurious_interrupt(void)
+static inline void __smp_spurious_interrupt(u8 vector)
{
u32 v;
@@ -1939,30 +1939,32 @@ static inline void __smp_spurious_interrupt(void)
* if it is a vectored one. Just in case...
* Spurious interrupts should not be ACKed.
*/
- v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
- if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+ v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
+ if (v & (1 << (vector & 0x1f)))
ack_APIC_irq();
inc_irq_stat(irq_spurious_count);
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
- pr_info("spurious APIC interrupt on CPU#%d, "
- "should never happen.\n", smp_processor_id());
+ pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
+ "should never happen.\n", vector, smp_processor_id());
}
__visible void smp_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
- __smp_spurious_interrupt();
+ __smp_spurious_interrupt(~regs->orig_ax);
exiting_irq();
}
__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
{
+ u8 vector = ~regs->orig_ax;
+
entering_irq();
- trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
- __smp_spurious_interrupt();
- trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR);
+ trace_spurious_apic_entry(vector);
+ __smp_spurious_interrupt(vector);
+ trace_spurious_apic_exit(vector);
exiting_irq();
}
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
new file mode 100644
index 000000000000..816f36e979ad
--- /dev/null
+++ b/arch/x86/kernel/apic/htirq.c
@@ -0,0 +1,107 @@
+/*
+ * Support Hypertransport IRQ
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ * Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/htirq.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/hypertransport.h>
+
+/*
+ * Hypertransport interrupt support
+ */
+static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
+{
+ struct ht_irq_msg msg;
+
+ fetch_ht_irq_msg(irq, &msg);
+
+ msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
+ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+
+ msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
+ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
+
+ write_ht_irq_msg(irq, &msg);
+}
+
+static int
+ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned int dest;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ target_ht_irq(data->irq, dest, cfg->vector);
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip ht_irq_chip = {
+ .name = "PCI-HT",
+ .irq_mask = mask_ht_irq,
+ .irq_unmask = unmask_ht_irq,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = ht_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+{
+ struct irq_cfg *cfg;
+ struct ht_irq_msg msg;
+ unsigned dest;
+ int err;
+
+ if (disable_apic)
+ return -ENXIO;
+
+ cfg = irq_cfg(irq);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
+
+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+
+ msg.address_lo =
+ HT_IRQ_LOW_BASE |
+ HT_IRQ_LOW_DEST_ID(dest) |
+ HT_IRQ_LOW_VECTOR(cfg->vector) |
+ ((apic->irq_dest_mode == 0) ?
+ HT_IRQ_LOW_DM_PHYSICAL :
+ HT_IRQ_LOW_DM_LOGICAL) |
+ HT_IRQ_LOW_RQEOI_EDGE |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ HT_IRQ_LOW_MT_FIXED :
+ HT_IRQ_LOW_MT_ARBITRATED) |
+ HT_IRQ_LOW_IRQ_MASKED;
+
+ write_ht_irq_msg(irq, &msg);
+
+ irq_set_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
+
+ dev_dbg(&dev->dev, "irq %d for HT\n", irq);
+
+ return 0;
+}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a6745e756729..3f5f60406ab1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -32,15 +32,11 @@
#include <linux/module.h>
#include <linux/syscore_ops.h>
#include <linux/irqdomain.h>
-#include <linux/msi.h>
-#include <linux/htirq.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/jiffies.h> /* time_after() */
#include <linux/slab.h>
#include <linux/bootmem.h>
-#include <linux/dmar.h>
-#include <linux/hpet.h>
#include <asm/idle.h>
#include <asm/io.h>
@@ -52,17 +48,12 @@
#include <asm/dma.h>
#include <asm/timer.h>
#include <asm/i8259.h>
-#include <asm/msidef.h>
-#include <asm/hypertransport.h>
#include <asm/setup.h>
#include <asm/irq_remapping.h>
-#include <asm/hpet.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
-#define __apicdebuginit(type) static type __init
-
#define for_each_ioapic(idx) \
for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
#define for_each_ioapic_reverse(idx) \
@@ -74,7 +65,7 @@
for_each_pin((idx), (pin))
#define for_each_irq_pin(entry, head) \
- for (entry = head; entry; entry = entry->next)
+ list_for_each_entry(entry, &head, list)
/*
* Is the SiS APIC rmw bug present ?
@@ -83,7 +74,6 @@
int sis_apic_bug = -1;
static DEFINE_RAW_SPINLOCK(ioapic_lock);
-static DEFINE_RAW_SPINLOCK(vector_lock);
static DEFINE_MUTEX(ioapic_mutex);
static unsigned int ioapic_dynirq_base;
static int ioapic_initialized;
@@ -112,6 +102,7 @@ static struct ioapic {
struct ioapic_domain_cfg irqdomain_cfg;
struct irq_domain *irqdomain;
struct mp_pin_info *pin_info;
+ struct resource *iomem_res;
} ioapics[MAX_IO_APICS];
#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
@@ -205,8 +196,6 @@ static int __init parse_noapic(char *str)
}
early_param("noapic", parse_noapic);
-static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
-
/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
void mp_save_irq(struct mpc_intsrc *m)
{
@@ -228,8 +217,8 @@ void mp_save_irq(struct mpc_intsrc *m)
}
struct irq_pin_list {
+ struct list_head list;
int apic, pin;
- struct irq_pin_list *next;
};
static struct irq_pin_list *alloc_irq_pin_list(int node)
@@ -237,7 +226,26 @@ static struct irq_pin_list *alloc_irq_pin_list(int node)
return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
}
-int __init arch_early_irq_init(void)
+static void alloc_ioapic_saved_registers(int idx)
+{
+ size_t size;
+
+ if (ioapics[idx].saved_registers)
+ return;
+
+ size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
+ ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
+ if (!ioapics[idx].saved_registers)
+ pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
+}
+
+static void free_ioapic_saved_registers(int idx)
+{
+ kfree(ioapics[idx].saved_registers);
+ ioapics[idx].saved_registers = NULL;
+}
+
+int __init arch_early_ioapic_init(void)
{
struct irq_cfg *cfg;
int i, node = cpu_to_node(0);
@@ -245,13 +253,8 @@ int __init arch_early_irq_init(void)
if (!nr_legacy_irqs())
io_apic_irqs = ~0UL;
- for_each_ioapic(i) {
- ioapics[i].saved_registers =
- kzalloc(sizeof(struct IO_APIC_route_entry) *
- ioapics[i].nr_registers, GFP_KERNEL);
- if (!ioapics[i].saved_registers)
- pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
- }
+ for_each_ioapic(i)
+ alloc_ioapic_saved_registers(i);
/*
* For legacy IRQ's, start with assigning irq0 to irq15 to
@@ -266,61 +269,6 @@ int __init arch_early_irq_init(void)
return 0;
}
-static inline struct irq_cfg *irq_cfg(unsigned int irq)
-{
- return irq_get_chip_data(irq);
-}
-
-static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
-{
- struct irq_cfg *cfg;
-
- cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
- if (!cfg)
- return NULL;
- if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
- goto out_cfg;
- if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
- goto out_domain;
- return cfg;
-out_domain:
- free_cpumask_var(cfg->domain);
-out_cfg:
- kfree(cfg);
- return NULL;
-}
-
-static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
-{
- if (!cfg)
- return;
- irq_set_chip_data(at, NULL);
- free_cpumask_var(cfg->domain);
- free_cpumask_var(cfg->old_domain);
- kfree(cfg);
-}
-
-static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
-{
- int res = irq_alloc_desc_at(at, node);
- struct irq_cfg *cfg;
-
- if (res < 0) {
- if (res != -EEXIST)
- return NULL;
- cfg = irq_cfg(at);
- if (cfg)
- return cfg;
- }
-
- cfg = alloc_irq_cfg(at, node);
- if (cfg)
- irq_set_chip_data(at, cfg);
- else
- irq_free_desc(at);
- return cfg;
-}
-
struct io_apic {
unsigned int index;
unsigned int unused[3];
@@ -445,15 +393,12 @@ static void ioapic_mask_entry(int apic, int pin)
*/
static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
{
- struct irq_pin_list **last, *entry;
+ struct irq_pin_list *entry;
/* don't allow duplicates */
- last = &cfg->irq_2_pin;
- for_each_irq_pin(entry, cfg->irq_2_pin) {
+ for_each_irq_pin(entry, cfg->irq_2_pin)
if (entry->apic == apic && entry->pin == pin)
return 0;
- last = &entry->next;
- }
entry = alloc_irq_pin_list(node);
if (!entry) {
@@ -464,22 +409,19 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
entry->apic = apic;
entry->pin = pin;
- *last = entry;
+ list_add_tail(&entry->list, &cfg->irq_2_pin);
return 0;
}
static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
{
- struct irq_pin_list **last, *entry;
+ struct irq_pin_list *tmp, *entry;
- last = &cfg->irq_2_pin;
- for_each_irq_pin(entry, cfg->irq_2_pin)
+ list_for_each_entry_safe(entry, tmp, &cfg->irq_2_pin, list)
if (entry->apic == apic && entry->pin == pin) {
- *last = entry->next;
+ list_del(&entry->list);
kfree(entry);
return;
- } else {
- last = &entry->next;
}
}
@@ -559,7 +501,7 @@ static void mask_ioapic(struct irq_cfg *cfg)
static void mask_ioapic_irq(struct irq_data *data)
{
- mask_ioapic(data->chip_data);
+ mask_ioapic(irqd_cfg(data));
}
static void __unmask_ioapic(struct irq_cfg *cfg)
@@ -578,7 +520,7 @@ static void unmask_ioapic(struct irq_cfg *cfg)
static void unmask_ioapic_irq(struct irq_data *data)
{
- unmask_ioapic(data->chip_data);
+ unmask_ioapic(irqd_cfg(data));
}
/*
@@ -1164,8 +1106,7 @@ void mp_unmap_irq(int irq)
* Find a specific PCI IRQ entry.
* Not an __init, possibly needed by modules
*/
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
- struct io_apic_irq_attr *irq_attr)
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
{
int irq, i, best_ioapic = -1, best_idx = -1;
@@ -1219,195 +1160,11 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
return -1;
out:
- irq = pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
- IOAPIC_MAP_ALLOC);
- if (irq > 0)
- set_io_apic_irq_attr(irq_attr, best_ioapic,
- mp_irqs[best_idx].dstirq,
- irq_trigger(best_idx),
- irq_polarity(best_idx));
- return irq;
+ return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
+ IOAPIC_MAP_ALLOC);
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-void lock_vector_lock(void)
-{
- /* Used to the online set of cpus does not change
- * during assign_irq_vector.
- */
- raw_spin_lock(&vector_lock);
-}
-
-void unlock_vector_lock(void)
-{
- raw_spin_unlock(&vector_lock);
-}
-
-static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
- /*
- * NOTE! The local APIC isn't very good at handling
- * multiple interrupts at the same interrupt level.
- * As the interrupt level is determined by taking the
- * vector number and shifting that right by 4, we
- * want to spread these out a bit so that they don't
- * all fall in the same interrupt level.
- *
- * Also, we've got to be careful not to trash gate
- * 0x80, because int 0x80 is hm, kind of importantish. ;)
- */
- static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
- static int current_offset = VECTOR_OFFSET_START % 16;
- int cpu, err;
- cpumask_var_t tmp_mask;
-
- if (cfg->move_in_progress)
- return -EBUSY;
-
- if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
- return -ENOMEM;
-
- /* Only try and allocate irqs on cpus that are present */
- err = -ENOSPC;
- cpumask_clear(cfg->old_domain);
- cpu = cpumask_first_and(mask, cpu_online_mask);
- while (cpu < nr_cpu_ids) {
- int new_cpu, vector, offset;
-
- apic->vector_allocation_domain(cpu, tmp_mask, mask);
-
- if (cpumask_subset(tmp_mask, cfg->domain)) {
- err = 0;
- if (cpumask_equal(tmp_mask, cfg->domain))
- break;
- /*
- * New cpumask using the vector is a proper subset of
- * the current in use mask. So cleanup the vector
- * allocation for the members that are not used anymore.
- */
- cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
- cfg->move_in_progress =
- cpumask_intersects(cfg->old_domain, cpu_online_mask);
- cpumask_and(cfg->domain, cfg->domain, tmp_mask);
- break;
- }
-
- vector = current_vector;
- offset = current_offset;
-next:
- vector += 16;
- if (vector >= first_system_vector) {
- offset = (offset + 1) % 16;
- vector = FIRST_EXTERNAL_VECTOR + offset;
- }
-
- if (unlikely(current_vector == vector)) {
- cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
- cpumask_andnot(tmp_mask, mask, cfg->old_domain);
- cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
- continue;
- }
-
- if (test_bit(vector, used_vectors))
- goto next;
-
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
- if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
- goto next;
- }
- /* Found one! */
- current_vector = vector;
- current_offset = offset;
- if (cfg->vector) {
- cpumask_copy(cfg->old_domain, cfg->domain);
- cfg->move_in_progress =
- cpumask_intersects(cfg->old_domain, cpu_online_mask);
- }
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
- per_cpu(vector_irq, new_cpu)[vector] = irq;
- cfg->vector = vector;
- cpumask_copy(cfg->domain, tmp_mask);
- err = 0;
- break;
- }
- free_cpumask_var(tmp_mask);
- return err;
-}
-
-int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
- int err;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&vector_lock, flags);
- err = __assign_irq_vector(irq, cfg, mask);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
- return err;
-}
-
-static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
-{
- int cpu, vector;
-
- BUG_ON(!cfg->vector);
-
- vector = cfg->vector;
- for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
-
- cfg->vector = 0;
- cpumask_clear(cfg->domain);
-
- if (likely(!cfg->move_in_progress))
- return;
- for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- if (per_cpu(vector_irq, cpu)[vector] != irq)
- continue;
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
- break;
- }
- }
- cfg->move_in_progress = 0;
-}
-
-void __setup_vector_irq(int cpu)
-{
- /* Initialize vector_irq on a new cpu */
- int irq, vector;
- struct irq_cfg *cfg;
-
- /*
- * vector_lock will make sure that we don't run into irq vector
- * assignments that might be happening on another cpu in parallel,
- * while we setup our initial vector to irq mappings.
- */
- raw_spin_lock(&vector_lock);
- /* Mark the inuse vectors */
- for_each_active_irq(irq) {
- cfg = irq_cfg(irq);
- if (!cfg)
- continue;
-
- if (!cpumask_test_cpu(cpu, cfg->domain))
- continue;
- vector = cfg->vector;
- per_cpu(vector_irq, cpu)[vector] = irq;
- }
- /* Mark the free vectors */
- for (vector = 0; vector < NR_VECTORS; ++vector) {
- irq = per_cpu(vector_irq, cpu)[vector];
- if (irq <= VECTOR_UNDEFINED)
- continue;
-
- cfg = irq_cfg(irq);
- if (!cpumask_test_cpu(cpu, cfg->domain))
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
- }
- raw_spin_unlock(&vector_lock);
-}
-
static struct irq_chip ioapic_chip;
#ifdef CONFIG_X86_32
@@ -1496,7 +1253,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
&dest)) {
pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
- __clear_irq_vector(irq, cfg);
+ clear_irq_vector(irq, cfg);
return;
}
@@ -1510,7 +1267,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
- __clear_irq_vector(irq, cfg);
+ clear_irq_vector(irq, cfg);
return;
}
@@ -1641,7 +1398,7 @@ void ioapic_zap_locks(void)
raw_spin_lock_init(&ioapic_lock);
}
-__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+static void __init print_IO_APIC(int ioapic_idx)
{
union IO_APIC_reg_00 reg_00;
union IO_APIC_reg_01 reg_01;
@@ -1698,7 +1455,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
}
-__apicdebuginit(void) print_IO_APICs(void)
+void __init print_IO_APICs(void)
{
int ioapic_idx;
struct irq_cfg *cfg;
@@ -1731,8 +1488,7 @@ __apicdebuginit(void) print_IO_APICs(void)
cfg = irq_cfg(irq);
if (!cfg)
continue;
- entry = cfg->irq_2_pin;
- if (!entry)
+ if (list_empty(&cfg->irq_2_pin))
continue;
printk(KERN_DEBUG "IRQ%d ", irq);
for_each_irq_pin(entry, cfg->irq_2_pin)
@@ -1743,205 +1499,6 @@ __apicdebuginit(void) print_IO_APICs(void)
printk(KERN_INFO ".................................... done.\n");
}
-__apicdebuginit(void) print_APIC_field(int base)
-{
- int i;
-
- printk(KERN_DEBUG);
-
- for (i = 0; i < 8; i++)
- pr_cont("%08x", apic_read(base + i*0x10));
-
- pr_cont("\n");
-}
-
-__apicdebuginit(void) print_local_APIC(void *dummy)
-{
- unsigned int i, v, ver, maxlvt;
- u64 icr;
-
- printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
- smp_processor_id(), hard_smp_processor_id());
- v = apic_read(APIC_ID);
- printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
- v = apic_read(APIC_LVR);
- printk(KERN_INFO "... APIC VERSION: %08x\n", v);
- ver = GET_APIC_VERSION(v);
- maxlvt = lapic_get_maxlvt();
-
- v = apic_read(APIC_TASKPRI);
- printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
- if (!APIC_XAPIC(ver)) {
- v = apic_read(APIC_ARBPRI);
- printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
- v & APIC_ARBPRI_MASK);
- }
- v = apic_read(APIC_PROCPRI);
- printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
- }
-
- /*
- * Remote read supported only in the 82489DX and local APIC for
- * Pentium processors.
- */
- if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
- v = apic_read(APIC_RRR);
- printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
- }
-
- v = apic_read(APIC_LDR);
- printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
- if (!x2apic_enabled()) {
- v = apic_read(APIC_DFR);
- printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
- }
- v = apic_read(APIC_SPIV);
- printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-
- printk(KERN_DEBUG "... APIC ISR field:\n");
- print_APIC_field(APIC_ISR);
- printk(KERN_DEBUG "... APIC TMR field:\n");
- print_APIC_field(APIC_TMR);
- printk(KERN_DEBUG "... APIC IRR field:\n");
- print_APIC_field(APIC_IRR);
-
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
- if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
- apic_write(APIC_ESR, 0);
-
- v = apic_read(APIC_ESR);
- printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
- }
-
- icr = apic_icr_read();
- printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
- printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
-
- v = apic_read(APIC_LVTT);
- printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-
- if (maxlvt > 3) { /* PC is LVT#4. */
- v = apic_read(APIC_LVTPC);
- printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
- }
- v = apic_read(APIC_LVT0);
- printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
- v = apic_read(APIC_LVT1);
- printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-
- if (maxlvt > 2) { /* ERR is LVT#3. */
- v = apic_read(APIC_LVTERR);
- printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
- }
-
- v = apic_read(APIC_TMICT);
- printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
- v = apic_read(APIC_TMCCT);
- printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
- v = apic_read(APIC_TDCR);
- printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-
- if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
- v = apic_read(APIC_EFEAT);
- maxlvt = (v >> 16) & 0xff;
- printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
- v = apic_read(APIC_ECTRL);
- printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
- for (i = 0; i < maxlvt; i++) {
- v = apic_read(APIC_EILVTn(i));
- printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
- }
- }
- pr_cont("\n");
-}
-
-__apicdebuginit(void) print_local_APICs(int maxcpu)
-{
- int cpu;
-
- if (!maxcpu)
- return;
-
- preempt_disable();
- for_each_online_cpu(cpu) {
- if (cpu >= maxcpu)
- break;
- smp_call_function_single(cpu, print_local_APIC, NULL, 1);
- }
- preempt_enable();
-}
-
-__apicdebuginit(void) print_PIC(void)
-{
- unsigned int v;
- unsigned long flags;
-
- if (!nr_legacy_irqs())
- return;
-
- printk(KERN_DEBUG "\nprinting PIC contents\n");
-
- raw_spin_lock_irqsave(&i8259A_lock, flags);
-
- v = inb(0xa1) << 8 | inb(0x21);
- printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
-
- v = inb(0xa0) << 8 | inb(0x20);
- printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
-
- outb(0x0b,0xa0);
- outb(0x0b,0x20);
- v = inb(0xa0) << 8 | inb(0x20);
- outb(0x0a,0xa0);
- outb(0x0a,0x20);
-
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
-
- v = inb(0x4d1) << 8 | inb(0x4d0);
- printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-}
-
-static int __initdata show_lapic = 1;
-static __init int setup_show_lapic(char *arg)
-{
- int num = -1;
-
- if (strcmp(arg, "all") == 0) {
- show_lapic = CONFIG_NR_CPUS;
- } else {
- get_option(&arg, &num);
- if (num >= 0)
- show_lapic = num;
- }
-
- return 1;
-}
-__setup("show_lapic=", setup_show_lapic);
-
-__apicdebuginit(int) print_ICs(void)
-{
- if (apic_verbosity == APIC_QUIET)
- return 0;
-
- print_PIC();
-
- /* don't print out if apic is not there */
- if (!cpu_has_apic && !apic_from_smp_config())
- return 0;
-
- print_local_APICs(show_lapic);
- print_IO_APICs();
-
- return 0;
-}
-
-late_initcall(print_ICs);
-
-
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -2244,26 +1801,12 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
if (legacy_pic->irq_pending(irq))
was_pending = 1;
}
- __unmask_ioapic(data->chip_data);
+ __unmask_ioapic(irqd_cfg(data));
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
}
-static int ioapic_retrigger_irq(struct irq_data *data)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned long flags;
- int cpu;
-
- raw_spin_lock_irqsave(&vector_lock, flags);
- cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
- apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
-
- return 1;
-}
-
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2273,113 +1816,6 @@ static int ioapic_retrigger_irq(struct irq_data *data)
* races.
*/
-#ifdef CONFIG_SMP
-void send_cleanup_vector(struct irq_cfg *cfg)
-{
- cpumask_var_t cleanup_mask;
-
- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
- unsigned int i;
- for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
- apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
- } else {
- cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
- free_cpumask_var(cleanup_mask);
- }
- cfg->move_in_progress = 0;
-}
-
-asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
-{
- unsigned vector, me;
-
- ack_APIC_irq();
- irq_enter();
- exit_idle();
-
- me = smp_processor_id();
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- int irq;
- unsigned int irr;
- struct irq_desc *desc;
- struct irq_cfg *cfg;
- irq = __this_cpu_read(vector_irq[vector]);
-
- if (irq <= VECTOR_UNDEFINED)
- continue;
-
- desc = irq_to_desc(irq);
- if (!desc)
- continue;
-
- cfg = irq_cfg(irq);
- if (!cfg)
- continue;
-
- raw_spin_lock(&desc->lock);
-
- /*
- * Check if the irq migration is in progress. If so, we
- * haven't received the cleanup request yet for this irq.
- */
- if (cfg->move_in_progress)
- goto unlock;
-
- if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
- goto unlock;
-
- irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
- /*
- * Check if the vector that needs to be cleanedup is
- * registered at the cpu's IRR. If so, then this is not
- * the best time to clean it up. Lets clean it up in the
- * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
- * to myself.
- */
- if (irr & (1 << (vector % 32))) {
- apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
- goto unlock;
- }
- __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
-unlock:
- raw_spin_unlock(&desc->lock);
- }
-
- irq_exit();
-}
-
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
-{
- unsigned me;
-
- if (likely(!cfg->move_in_progress))
- return;
-
- me = smp_processor_id();
-
- if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
- send_cleanup_vector(cfg);
-}
-
-static void irq_complete_move(struct irq_cfg *cfg)
-{
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
-void irq_force_complete_move(int irq)
-{
- struct irq_cfg *cfg = irq_cfg(irq);
-
- if (!cfg)
- return;
-
- __irq_complete_move(cfg, cfg->vector);
-}
-#else
-static inline void irq_complete_move(struct irq_cfg *cfg) { }
-#endif
-
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
@@ -2400,41 +1836,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
}
}
-/*
- * Either sets data->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
- * leaves data->affinity untouched.
- */
-int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- unsigned int *dest_id)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int irq = data->irq;
- int err;
-
- if (!config_enabled(CONFIG_SMP))
- return -EPERM;
-
- if (!cpumask_intersects(mask, cpu_online_mask))
- return -EINVAL;
-
- err = assign_irq_vector(irq, cfg, mask);
- if (err)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
- if (err) {
- if (assign_irq_vector(irq, cfg, data->affinity))
- pr_err("Failed to recover vector for irq %d\n", irq);
- return err;
- }
-
- cpumask_copy(data->affinity, mask);
-
- return 0;
-}
-
-
int native_ioapic_set_affinity(struct irq_data *data,
const struct cpumask *mask,
bool force)
@@ -2447,24 +1848,17 @@ int native_ioapic_set_affinity(struct irq_data *data,
return -EPERM;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- ret = __ioapic_set_affinity(data, mask, &dest);
+ ret = apic_set_affinity(data, mask, &dest);
if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
- __target_IO_APIC_irq(irq, dest, data->chip_data);
+ __target_IO_APIC_irq(irq, dest, irqd_cfg(data));
ret = IRQ_SET_MASK_OK_NOCOPY;
}
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
-static void ack_apic_edge(struct irq_data *data)
-{
- irq_complete_move(data->chip_data);
- irq_move_irq(data);
- ack_APIC_irq();
-}
-
atomic_t irq_mis_count;
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -2547,9 +1941,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data,
}
#endif
-static void ack_apic_level(struct irq_data *data)
+static void ack_ioapic_level(struct irq_data *data)
{
- struct irq_cfg *cfg = data->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
int i, irq = data->irq;
unsigned long v;
bool masked;
@@ -2619,10 +2013,10 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_startup = startup_ioapic_irq,
.irq_mask = mask_ioapic_irq,
.irq_unmask = unmask_ioapic_irq,
- .irq_ack = ack_apic_edge,
- .irq_eoi = ack_apic_level,
+ .irq_ack = apic_ack_edge,
+ .irq_eoi = ack_ioapic_level,
.irq_set_affinity = native_ioapic_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
+ .irq_retrigger = apic_retrigger_irq,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -2965,6 +2359,16 @@ static int mp_irqdomain_create(int ioapic)
return 0;
}
+static void ioapic_destroy_irqdomain(int idx)
+{
+ if (ioapics[idx].irqdomain) {
+ irq_domain_remove(ioapics[idx].irqdomain);
+ ioapics[idx].irqdomain = NULL;
+ }
+ kfree(ioapics[idx].pin_info);
+ ioapics[idx].pin_info = NULL;
+}
+
void __init setup_IO_APIC(void)
{
int ioapic;
@@ -3044,399 +2448,6 @@ static int __init ioapic_init_ops(void)
device_initcall(ioapic_init_ops);
-/*
- * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
- */
-int arch_setup_hwirq(unsigned int irq, int node)
-{
- struct irq_cfg *cfg;
- unsigned long flags;
- int ret;
-
- cfg = alloc_irq_cfg(irq, node);
- if (!cfg)
- return -ENOMEM;
-
- raw_spin_lock_irqsave(&vector_lock, flags);
- ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
- raw_spin_unlock_irqrestore(&vector_lock, flags);
-
- if (!ret)
- irq_set_chip_data(irq, cfg);
- else
- free_irq_cfg(irq, cfg);
- return ret;
-}
-
-void arch_teardown_hwirq(unsigned int irq)
-{
- struct irq_cfg *cfg = irq_cfg(irq);
- unsigned long flags;
-
- free_remapped_irq(irq);
- raw_spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq, cfg);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
- free_irq_cfg(irq, cfg);
-}
-
-/*
- * MSI message composition
- */
-void native_compose_msi_msg(struct pci_dev *pdev,
- unsigned int irq, unsigned int dest,
- struct msi_msg *msg, u8 hpet_id)
-{
- struct irq_cfg *cfg = irq_cfg(irq);
-
- msg->address_hi = MSI_ADDR_BASE_HI;
-
- if (x2apic_enabled())
- msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
-
- msg->address_lo =
- MSI_ADDR_BASE_LO |
- ((apic->irq_dest_mode == 0) ?
- MSI_ADDR_DEST_MODE_PHYSICAL:
- MSI_ADDR_DEST_MODE_LOGICAL) |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- MSI_ADDR_REDIRECTION_CPU:
- MSI_ADDR_REDIRECTION_LOWPRI) |
- MSI_ADDR_DEST_ID(dest);
-
- msg->data =
- MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- MSI_DATA_DELIVERY_FIXED:
- MSI_DATA_DELIVERY_LOWPRI) |
- MSI_DATA_VECTOR(cfg->vector);
-}
-
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
- struct msi_msg *msg, u8 hpet_id)
-{
- struct irq_cfg *cfg;
- int err;
- unsigned dest;
-
- if (disable_apic)
- return -ENXIO;
-
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
- if (err)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(cfg->domain,
- apic->target_cpus(), &dest);
- if (err)
- return err;
-
- x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
-
- return 0;
-}
-
-static int
-msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- struct msi_msg msg;
- unsigned int dest;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- __get_cached_msi_msg(data->msi_desc, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
- __pci_write_msi_msg(data->msi_desc, &msg);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-/*
- * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
- * which implement the MSI or MSI-X Capability Structure.
- */
-static struct irq_chip msi_chip = {
- .name = "PCI-MSI",
- .irq_unmask = pci_msi_unmask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = msi_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- unsigned int irq_base, unsigned int irq_offset)
-{
- struct irq_chip *chip = &msi_chip;
- struct msi_msg msg;
- unsigned int irq = irq_base + irq_offset;
- int ret;
-
- ret = msi_compose_msg(dev, irq, &msg, -1);
- if (ret < 0)
- return ret;
-
- irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
-
- /*
- * MSI-X message is written per-IRQ, the offset is always 0.
- * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
- */
- if (!irq_offset)
- pci_write_msi_msg(irq, &msg);
-
- setup_remapped_irq(irq, irq_cfg(irq), chip);
-
- irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
-
- dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
-
- return 0;
-}
-
-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct msi_desc *msidesc;
- unsigned int irq;
- int node, ret;
-
- /* Multiple MSI vectors only supported with interrupt remapping */
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
-
- node = dev_to_node(&dev->dev);
-
- list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = irq_alloc_hwirq(node);
- if (!irq)
- return -ENOSPC;
-
- ret = setup_msi_irq(dev, msidesc, irq, 0);
- if (ret < 0) {
- irq_free_hwirq(irq);
- return ret;
- }
-
- }
- return 0;
-}
-
-void native_teardown_msi_irq(unsigned int irq)
-{
- irq_free_hwirq(irq);
-}
-
-#ifdef CONFIG_DMAR_TABLE
-static int
-dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int dest, irq = data->irq;
- struct msi_msg msg;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- dmar_msi_read(irq, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
-
- dmar_msi_write(irq, &msg);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip dmar_msi_type = {
- .name = "DMAR_MSI",
- .irq_unmask = dmar_msi_unmask,
- .irq_mask = dmar_msi_mask,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = dmar_msi_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_dmar_msi(unsigned int irq)
-{
- int ret;
- struct msi_msg msg;
-
- ret = msi_compose_msg(NULL, irq, &msg, -1);
- if (ret < 0)
- return ret;
- dmar_msi_write(irq, &msg);
- irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
- "edge");
- return 0;
-}
-#endif
-
-#ifdef CONFIG_HPET_TIMER
-
-static int hpet_msi_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- struct msi_msg msg;
- unsigned int dest;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- hpet_msi_read(data->handler_data, &msg);
-
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
- msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
- hpet_msi_write(data->handler_data, &msg);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip hpet_msi_type = {
- .name = "HPET_MSI",
- .irq_unmask = hpet_msi_unmask,
- .irq_mask = hpet_msi_mask,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = hpet_msi_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int default_setup_hpet_msi(unsigned int irq, unsigned int id)
-{
- struct irq_chip *chip = &hpet_msi_type;
- struct msi_msg msg;
- int ret;
-
- ret = msi_compose_msg(NULL, irq, &msg, id);
- if (ret < 0)
- return ret;
-
- hpet_msi_write(irq_get_handler_data(irq), &msg);
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- setup_remapped_irq(irq, irq_cfg(irq), chip);
-
- irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
- return 0;
-}
-#endif
-
-#endif /* CONFIG_PCI_MSI */
-/*
- * Hypertransport interrupt support
- */
-#ifdef CONFIG_HT_IRQ
-
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
-{
- struct ht_irq_msg msg;
- fetch_ht_irq_msg(irq, &msg);
-
- msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
- msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-
- msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
- msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-
- write_ht_irq_msg(irq, &msg);
-}
-
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int dest;
- int ret;
-
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (ret)
- return ret;
-
- target_ht_irq(data->irq, dest, cfg->vector);
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip ht_irq_chip = {
- .name = "PCI-HT",
- .irq_mask = mask_ht_irq,
- .irq_unmask = unmask_ht_irq,
- .irq_ack = ack_apic_edge,
- .irq_set_affinity = ht_set_affinity,
- .irq_retrigger = ioapic_retrigger_irq,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-{
- struct irq_cfg *cfg;
- struct ht_irq_msg msg;
- unsigned dest;
- int err;
-
- if (disable_apic)
- return -ENXIO;
-
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
- if (err)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(cfg->domain,
- apic->target_cpus(), &dest);
- if (err)
- return err;
-
- msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-
- msg.address_lo =
- HT_IRQ_LOW_BASE |
- HT_IRQ_LOW_DEST_ID(dest) |
- HT_IRQ_LOW_VECTOR(cfg->vector) |
- ((apic->irq_dest_mode == 0) ?
- HT_IRQ_LOW_DM_PHYSICAL :
- HT_IRQ_LOW_DM_LOGICAL) |
- HT_IRQ_LOW_RQEOI_EDGE |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- HT_IRQ_LOW_MT_FIXED :
- HT_IRQ_LOW_MT_ARBITRATED) |
- HT_IRQ_LOW_IRQ_MASKED;
-
- write_ht_irq_msg(irq, &msg);
-
- irq_set_chip_and_handler_name(irq, &ht_irq_chip,
- handle_edge_irq, "edge");
-
- dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
-
- return 0;
-}
-#endif /* CONFIG_HT_IRQ */
-
static int
io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
{
@@ -3451,7 +2462,7 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
return ret;
}
-static int __init io_apic_get_redir_entries(int ioapic)
+static int io_apic_get_redir_entries(int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -3476,28 +2487,8 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
}
-int __init arch_probe_nr_irqs(void)
-{
- int nr;
-
- if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
- nr_irqs = NR_VECTORS * nr_cpu_ids;
-
- nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
- /*
- * for MSI and HT dyn irq
- */
- nr += gsi_top * 16;
-#endif
- if (nr < nr_irqs)
- nr_irqs = nr;
-
- return 0;
-}
-
#ifdef CONFIG_X86_32
-static int __init io_apic_get_unique_id(int ioapic, int apic_id)
+static int io_apic_get_unique_id(int ioapic, int apic_id)
{
union IO_APIC_reg_00 reg_00;
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
@@ -3572,30 +2563,63 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id)
return apic_id;
}
-static u8 __init io_apic_unique_id(u8 id)
+static u8 io_apic_unique_id(int idx, u8 id)
{
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
- return io_apic_get_unique_id(nr_ioapics, id);
+ return io_apic_get_unique_id(idx, id);
else
return id;
}
#else
-static u8 __init io_apic_unique_id(u8 id)
+static u8 io_apic_unique_id(int idx, u8 id)
{
- int i;
+ union IO_APIC_reg_00 reg_00;
DECLARE_BITMAP(used, 256);
+ unsigned long flags;
+ u8 new_id;
+ int i;
bitmap_zero(used, 256);
for_each_ioapic(i)
__set_bit(mpc_ioapic_id(i), used);
+
+ /* Hand out the requested id if available */
if (!test_bit(id, used))
return id;
- return find_first_zero_bit(used, 256);
+
+ /*
+ * Read the current id from the ioapic and keep it if
+ * available.
+ */
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(idx, 0);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ new_id = reg_00.bits.ID;
+ if (!test_bit(new_id, used)) {
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
+ idx, new_id, id);
+ return new_id;
+ }
+
+ /*
+ * Get the next free id and write it to the ioapic.
+ */
+ new_id = find_first_zero_bit(used, 256);
+ reg_00.bits.ID = new_id;
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(idx, 0, reg_00.raw);
+ reg_00.raw = io_apic_read(idx, 0);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ /* Sanity check */
+ BUG_ON(reg_00.bits.ID != new_id);
+
+ return new_id;
}
#endif
-static int __init io_apic_get_version(int ioapic)
+static int io_apic_get_version(int ioapic)
{
union IO_APIC_reg_01 reg_01;
unsigned long flags;
@@ -3702,6 +2726,7 @@ static struct resource * __init ioapic_setup_resources(void)
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
num++;
+ ioapics[i].iomem_res = res;
}
ioapic_resources = res;
@@ -3799,21 +2824,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
return gsi - gsi_cfg->gsi_base;
}
-static __init int bad_ioapic(unsigned long address)
-{
- if (nr_ioapics >= MAX_IO_APICS) {
- pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
- MAX_IO_APICS, nr_ioapics);
- return 1;
- }
- if (!address) {
- pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
- return 1;
- }
- return 0;
-}
-
-static __init int bad_ioapic_register(int idx)
+static int bad_ioapic_register(int idx)
{
union IO_APIC_reg_00 reg_00;
union IO_APIC_reg_01 reg_01;
@@ -3832,32 +2843,61 @@ static __init int bad_ioapic_register(int idx)
return 0;
}
-void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
- struct ioapic_domain_cfg *cfg)
+static int find_free_ioapic_entry(void)
{
- int idx = 0;
- int entries;
+ int idx;
+
+ for (idx = 0; idx < MAX_IO_APICS; idx++)
+ if (ioapics[idx].nr_registers == 0)
+ return idx;
+
+ return MAX_IO_APICS;
+}
+
+/**
+ * mp_register_ioapic - Register an IOAPIC device
+ * @id: hardware IOAPIC ID
+ * @address: physical address of IOAPIC register area
+ * @gsi_base: base of GSI associated with the IOAPIC
+ * @cfg: configuration information for the IOAPIC
+ */
+int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+ struct ioapic_domain_cfg *cfg)
+{
+ bool hotplug = !!ioapic_initialized;
struct mp_ioapic_gsi *gsi_cfg;
+ int idx, ioapic, entries;
+ u32 gsi_end;
- if (bad_ioapic(address))
- return;
+ if (!address) {
+ pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
+ return -EINVAL;
+ }
+ for_each_ioapic(ioapic)
+ if (ioapics[ioapic].mp_config.apicaddr == address) {
+ pr_warn("address 0x%x conflicts with IOAPIC%d\n",
+ address, ioapic);
+ return -EEXIST;
+ }
- idx = nr_ioapics;
+ idx = find_free_ioapic_entry();
+ if (idx >= MAX_IO_APICS) {
+ pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
+ MAX_IO_APICS, idx);
+ return -ENOSPC;
+ }
ioapics[idx].mp_config.type = MP_IOAPIC;
ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
ioapics[idx].mp_config.apicaddr = address;
- ioapics[idx].irqdomain = NULL;
- ioapics[idx].irqdomain_cfg = *cfg;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-
if (bad_ioapic_register(idx)) {
clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
- return;
+ return -ENODEV;
}
- ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
+ ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
/*
@@ -3865,24 +2905,112 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
entries = io_apic_get_redir_entries(idx);
+ gsi_end = gsi_base + entries - 1;
+ for_each_ioapic(ioapic) {
+ gsi_cfg = mp_ioapic_gsi_routing(ioapic);
+ if ((gsi_base >= gsi_cfg->gsi_base &&
+ gsi_base <= gsi_cfg->gsi_end) ||
+ (gsi_end >= gsi_cfg->gsi_base &&
+ gsi_end <= gsi_cfg->gsi_end)) {
+ pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
+ gsi_base, gsi_end,
+ gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+ clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+ return -ENOSPC;
+ }
+ }
gsi_cfg = mp_ioapic_gsi_routing(idx);
gsi_cfg->gsi_base = gsi_base;
- gsi_cfg->gsi_end = gsi_base + entries - 1;
+ gsi_cfg->gsi_end = gsi_end;
+
+ ioapics[idx].irqdomain = NULL;
+ ioapics[idx].irqdomain_cfg = *cfg;
/*
- * The number of IO-APIC IRQ registers (== #pins):
+ * If mp_register_ioapic() is called during early boot stage when
+ * walking ACPI/SFI/DT tables, it's too early to create irqdomain,
+ * we are still using bootmem allocator. So delay it to setup_IO_APIC().
*/
- ioapics[idx].nr_registers = entries;
+ if (hotplug) {
+ if (mp_irqdomain_create(idx)) {
+ clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+ return -ENOMEM;
+ }
+ alloc_ioapic_saved_registers(idx);
+ }
if (gsi_cfg->gsi_end >= gsi_top)
gsi_top = gsi_cfg->gsi_end + 1;
+ if (nr_ioapics <= idx)
+ nr_ioapics = idx + 1;
+
+ /* Set nr_registers to mark entry present */
+ ioapics[idx].nr_registers = entries;
pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
idx, mpc_ioapic_id(idx),
mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
gsi_cfg->gsi_base, gsi_cfg->gsi_end);
- nr_ioapics++;
+ return 0;
+}
+
+int mp_unregister_ioapic(u32 gsi_base)
+{
+ int ioapic, pin;
+ int found = 0;
+ struct mp_pin_info *pin_info;
+
+ for_each_ioapic(ioapic)
+ if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
+ found = 1;
+ break;
+ }
+ if (!found) {
+ pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
+ return -ENODEV;
+ }
+
+ for_each_pin(ioapic, pin) {
+ pin_info = mp_pin_info(ioapic, pin);
+ if (pin_info->count) {
+ pr_warn("pin%d on IOAPIC%d is still in use.\n",
+ pin, ioapic);
+ return -EBUSY;
+ }
+ }
+
+ /* Mark entry not present */
+ ioapics[ioapic].nr_registers = 0;
+ ioapic_destroy_irqdomain(ioapic);
+ free_ioapic_saved_registers(ioapic);
+ if (ioapics[ioapic].iomem_res)
+ release_resource(ioapics[ioapic].iomem_res);
+ clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
+ memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
+
+ return 0;
+}
+
+int mp_ioapic_registered(u32 gsi_base)
+{
+ int ioapic;
+
+ for_each_ioapic(ioapic)
+ if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
+ return 1;
+
+ return 0;
+}
+
+static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
+ int ioapic, int ioapic_pin,
+ int trigger, int polarity)
+{
+ irq_attr->ioapic = ioapic;
+ irq_attr->ioapic_pin = ioapic_pin;
+ irq_attr->trigger = trigger;
+ irq_attr->polarity = polarity;
}
int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
@@ -3931,7 +3059,7 @@ void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq)
ioapic_mask_entry(ioapic, pin);
__remove_pin_from_irq(cfg, ioapic, pin);
- WARN_ON(cfg->irq_2_pin != NULL);
+ WARN_ON(!list_empty(&cfg->irq_2_pin));
arch_teardown_hwirq(virq);
}
@@ -3964,18 +3092,6 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
return ret;
}
-bool mp_should_keep_irq(struct device *dev)
-{
- if (dev->power.is_prepared)
- return true;
-#ifdef CONFIG_PM
- if (dev->power.runtime_status == RPM_SUSPENDING)
- return true;
-#endif
-
- return false;
-}
-
/* Enable IOAPIC early just for system timer */
void __init pre_init_apic_IRQ0(void)
{
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
new file mode 100644
index 000000000000..d6ba2d660dc5
--- /dev/null
+++ b/arch/x86/kernel/apic/msi.c
@@ -0,0 +1,286 @@
+/*
+ * Support of MSI, HPET and DMAR interrupts.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ * Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dmar.h>
+#include <linux/hpet.h>
+#include <linux/msi.h>
+#include <asm/msidef.h>
+#include <asm/hpet.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/irq_remapping.h>
+
+void native_compose_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+
+ if (x2apic_enabled())
+ msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
+
+ msg->address_lo =
+ MSI_ADDR_BASE_LO |
+ ((apic->irq_dest_mode == 0) ?
+ MSI_ADDR_DEST_MODE_PHYSICAL :
+ MSI_ADDR_DEST_MODE_LOGICAL) |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ MSI_ADDR_REDIRECTION_CPU :
+ MSI_ADDR_REDIRECTION_LOWPRI) |
+ MSI_ADDR_DEST_ID(dest);
+
+ msg->data =
+ MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED :
+ MSI_DATA_DELIVERY_LOWPRI) |
+ MSI_DATA_VECTOR(cfg->vector);
+}
+
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+ struct msi_msg *msg, u8 hpet_id)
+{
+ struct irq_cfg *cfg;
+ int err;
+ unsigned dest;
+
+ if (disable_apic)
+ return -ENXIO;
+
+ cfg = irq_cfg(irq);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
+
+ x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+
+ return 0;
+}
+
+static int
+msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct msi_msg msg;
+ unsigned int dest;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ __get_cached_msi_msg(data->msi_desc, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ __pci_write_msi_msg(data->msi_desc, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip msi_chip = {
+ .name = "PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = msi_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset)
+{
+ struct irq_chip *chip = &msi_chip;
+ struct msi_msg msg;
+ unsigned int irq = irq_base + irq_offset;
+ int ret;
+
+ ret = msi_compose_msg(dev, irq, &msg, -1);
+ if (ret < 0)
+ return ret;
+
+ irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
+
+ /*
+ * MSI-X message is written per-IRQ, the offset is always 0.
+ * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+ */
+ if (!irq_offset)
+ pci_write_msi_msg(irq, &msg);
+
+ setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+ irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+
+ dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq);
+
+ return 0;
+}
+
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *msidesc;
+ unsigned int irq;
+ int node, ret;
+
+ /* Multiple MSI vectors only supported with interrupt remapping */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
+ node = dev_to_node(&dev->dev);
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = irq_alloc_hwirq(node);
+ if (!irq)
+ return -ENOSPC;
+
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
+ if (ret < 0) {
+ irq_free_hwirq(irq);
+ return ret;
+ }
+
+ }
+ return 0;
+}
+
+void native_teardown_msi_irq(unsigned int irq)
+{
+ irq_free_hwirq(irq);
+}
+
+#ifdef CONFIG_DMAR_TABLE
+static int
+dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned int dest, irq = data->irq;
+ struct msi_msg msg;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ dmar_msi_read(irq, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+ msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
+
+ dmar_msi_write(irq, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip dmar_msi_type = {
+ .name = "DMAR_MSI",
+ .irq_unmask = dmar_msi_unmask,
+ .irq_mask = dmar_msi_mask,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = dmar_msi_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+ int ret;
+ struct msi_msg msg;
+
+ ret = msi_compose_msg(NULL, irq, &msg, -1);
+ if (ret < 0)
+ return ret;
+ dmar_msi_write(irq, &msg);
+ irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+ "edge");
+ return 0;
+}
+#endif
+
+/*
+ * MSI message composition
+ */
+#ifdef CONFIG_HPET_TIMER
+
+static int hpet_msi_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct msi_msg msg;
+ unsigned int dest;
+ int ret;
+
+ ret = apic_set_affinity(data, mask, &dest);
+ if (ret)
+ return ret;
+
+ hpet_msi_read(data->handler_data, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ hpet_msi_write(data->handler_data, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip hpet_msi_type = {
+ .name = "HPET_MSI",
+ .irq_unmask = hpet_msi_unmask,
+ .irq_mask = hpet_msi_mask,
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = hpet_msi_set_affinity,
+ .irq_retrigger = apic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int default_setup_hpet_msi(unsigned int irq, unsigned int id)
+{
+ struct irq_chip *chip = &hpet_msi_type;
+ struct msi_msg msg;
+ int ret;
+
+ ret = msi_compose_msg(NULL, irq, &msg, id);
+ if (ret < 0)
+ return ret;
+
+ hpet_msi_write(irq_get_handler_data(irq), &msg);
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+ setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+ irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+ return 0;
+}
+#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
new file mode 100644
index 000000000000..6cedd7914581
--- /dev/null
+++ b/arch/x86/kernel/apic/vector.c
@@ -0,0 +1,719 @@
+/*
+ * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ * Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/i8259.h>
+#include <asm/desc.h>
+#include <asm/irq_remapping.h>
+
+static DEFINE_RAW_SPINLOCK(vector_lock);
+
+void lock_vector_lock(void)
+{
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+ */
+ raw_spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+ raw_spin_unlock(&vector_lock);
+}
+
+struct irq_cfg *irq_cfg(unsigned int irq)
+{
+ return irq_get_chip_data(irq);
+}
+
+struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
+{
+ return irq_data->chip_data;
+}
+
+static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
+{
+ struct irq_cfg *cfg;
+
+ cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
+ if (!cfg)
+ return NULL;
+ if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
+ goto out_cfg;
+ if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
+ goto out_domain;
+#ifdef CONFIG_X86_IO_APIC
+ INIT_LIST_HEAD(&cfg->irq_2_pin);
+#endif
+ return cfg;
+out_domain:
+ free_cpumask_var(cfg->domain);
+out_cfg:
+ kfree(cfg);
+ return NULL;
+}
+
+struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
+{
+ int res = irq_alloc_desc_at(at, node);
+ struct irq_cfg *cfg;
+
+ if (res < 0) {
+ if (res != -EEXIST)
+ return NULL;
+ cfg = irq_cfg(at);
+ if (cfg)
+ return cfg;
+ }
+
+ cfg = alloc_irq_cfg(at, node);
+ if (cfg)
+ irq_set_chip_data(at, cfg);
+ else
+ irq_free_desc(at);
+ return cfg;
+}
+
+static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
+{
+ if (!cfg)
+ return;
+ irq_set_chip_data(at, NULL);
+ free_cpumask_var(cfg->domain);
+ free_cpumask_var(cfg->old_domain);
+ kfree(cfg);
+}
+
+static int
+__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+ /*
+ * NOTE! The local APIC isn't very good at handling
+ * multiple interrupts at the same interrupt level.
+ * As the interrupt level is determined by taking the
+ * vector number and shifting that right by 4, we
+ * want to spread these out a bit so that they don't
+ * all fall in the same interrupt level.
+ *
+ * Also, we've got to be careful not to trash gate
+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
+ */
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 16;
+ int cpu, err;
+ cpumask_var_t tmp_mask;
+
+ if (cfg->move_in_progress)
+ return -EBUSY;
+
+ if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
+ return -ENOMEM;
+
+ /* Only try and allocate irqs on cpus that are present */
+ err = -ENOSPC;
+ cpumask_clear(cfg->old_domain);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ while (cpu < nr_cpu_ids) {
+ int new_cpu, vector, offset;
+
+ apic->vector_allocation_domain(cpu, tmp_mask, mask);
+
+ if (cpumask_subset(tmp_mask, cfg->domain)) {
+ err = 0;
+ if (cpumask_equal(tmp_mask, cfg->domain))
+ break;
+ /*
+ * New cpumask using the vector is a proper subset of
+ * the current in use mask. So cleanup the vector
+ * allocation for the members that are not used anymore.
+ */
+ cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
+ cfg->move_in_progress =
+ cpumask_intersects(cfg->old_domain, cpu_online_mask);
+ cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+ break;
+ }
+
+ vector = current_vector;
+ offset = current_offset;
+next:
+ vector += 16;
+ if (vector >= first_system_vector) {
+ offset = (offset + 1) % 16;
+ vector = FIRST_EXTERNAL_VECTOR + offset;
+ }
+
+ if (unlikely(current_vector == vector)) {
+ cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
+ cpumask_andnot(tmp_mask, mask, cfg->old_domain);
+ cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
+ continue;
+ }
+
+ if (test_bit(vector, used_vectors))
+ goto next;
+
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+ if (per_cpu(vector_irq, new_cpu)[vector] >
+ VECTOR_UNDEFINED)
+ goto next;
+ }
+ /* Found one! */
+ current_vector = vector;
+ current_offset = offset;
+ if (cfg->vector) {
+ cpumask_copy(cfg->old_domain, cfg->domain);
+ cfg->move_in_progress =
+ cpumask_intersects(cfg->old_domain, cpu_online_mask);
+ }
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq;
+ cfg->vector = vector;
+ cpumask_copy(cfg->domain, tmp_mask);
+ err = 0;
+ break;
+ }
+ free_cpumask_var(tmp_mask);
+
+ return err;
+}
+
+int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+ int err;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ err = __assign_irq_vector(irq, cfg, mask);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return err;
+}
+
+void clear_irq_vector(int irq, struct irq_cfg *cfg)
+{
+ int cpu, vector;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ BUG_ON(!cfg->vector);
+
+ vector = cfg->vector;
+ for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+
+ cfg->vector = 0;
+ cpumask_clear(cfg->domain);
+
+ if (likely(!cfg->move_in_progress)) {
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return;
+ }
+
+ for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+ vector++) {
+ if (per_cpu(vector_irq, cpu)[vector] != irq)
+ continue;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+ break;
+ }
+ }
+ cfg->move_in_progress = 0;
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+int __init arch_probe_nr_irqs(void)
+{
+ int nr;
+
+ if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
+ nr_irqs = NR_VECTORS * nr_cpu_ids;
+
+ nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
+#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+ /*
+ * for MSI and HT dyn irq
+ */
+ if (gsi_top <= NR_IRQS_LEGACY)
+ nr += 8 * nr_cpu_ids;
+ else
+ nr += gsi_top * 16;
+#endif
+ if (nr < nr_irqs)
+ nr_irqs = nr;
+
+ return nr_legacy_irqs();
+}
+
+int __init arch_early_irq_init(void)
+{
+ return arch_early_ioapic_init();
+}
+
+static void __setup_vector_irq(int cpu)
+{
+ /* Initialize vector_irq on a new cpu */
+ int irq, vector;
+ struct irq_cfg *cfg;
+
+ /*
+ * vector_lock will make sure that we don't run into irq vector
+ * assignments that might be happening on another cpu in parallel,
+ * while we setup our initial vector to irq mappings.
+ */
+ raw_spin_lock(&vector_lock);
+ /* Mark the inuse vectors */
+ for_each_active_irq(irq) {
+ cfg = irq_cfg(irq);
+ if (!cfg)
+ continue;
+
+ if (!cpumask_test_cpu(cpu, cfg->domain))
+ continue;
+ vector = cfg->vector;
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ }
+ /* Mark the free vectors */
+ for (vector = 0; vector < NR_VECTORS; ++vector) {
+ irq = per_cpu(vector_irq, cpu)[vector];
+ if (irq <= VECTOR_UNDEFINED)
+ continue;
+
+ cfg = irq_cfg(irq);
+ if (!cpumask_test_cpu(cpu, cfg->domain))
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+ }
+ raw_spin_unlock(&vector_lock);
+}
+
+/*
+ * Setup the vector to irq mappings.
+ */
+void setup_vector_irq(int cpu)
+{
+ int irq;
+
+ /*
+ * On most of the platforms, legacy PIC delivers the interrupts on the
+ * boot cpu. But there are certain platforms where PIC interrupts are
+ * delivered to multiple cpu's. If the legacy IRQ is handled by the
+ * legacy PIC, for the new cpu that is coming online, setup the static
+ * legacy vector to irq mapping:
+ */
+ for (irq = 0; irq < nr_legacy_irqs(); irq++)
+ per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+
+ __setup_vector_irq(cpu);
+}
+
+int apic_retrigger_irq(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned long flags;
+ int cpu;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
+ apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+ return 1;
+}
+
+void apic_ack_edge(struct irq_data *data)
+{
+ irq_complete_move(irqd_cfg(data));
+ irq_move_irq(data);
+ ack_APIC_irq();
+}
+
+/*
+ * Either sets data->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
+ * leaves data->affinity untouched.
+ */
+int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ unsigned int *dest_id)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ unsigned int irq = data->irq;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -EPERM;
+
+ if (!cpumask_intersects(mask, cpu_online_mask))
+ return -EINVAL;
+
+ err = assign_irq_vector(irq, cfg, mask);
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
+
+ cpumask_copy(data->affinity, mask);
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+ cpumask_var_t cleanup_mask;
+
+ if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+ unsigned int i;
+
+ for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+ apic->send_IPI_mask(cpumask_of(i),
+ IRQ_MOVE_CLEANUP_VECTOR);
+ } else {
+ cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
+ apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ free_cpumask_var(cleanup_mask);
+ }
+ cfg->move_in_progress = 0;
+}
+
+asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+{
+ unsigned vector, me;
+
+ ack_APIC_irq();
+ irq_enter();
+ exit_idle();
+
+ me = smp_processor_id();
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+ int irq;
+ unsigned int irr;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+
+ irq = __this_cpu_read(vector_irq[vector]);
+
+ if (irq <= VECTOR_UNDEFINED)
+ continue;
+
+ desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
+ cfg = irq_cfg(irq);
+ if (!cfg)
+ continue;
+
+ raw_spin_lock(&desc->lock);
+
+ /*
+ * Check if the irq migration is in progress. If so, we
+ * haven't received the cleanup request yet for this irq.
+ */
+ if (cfg->move_in_progress)
+ goto unlock;
+
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+ goto unlock;
+
+ irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+ /*
+ * Check if the vector that needs to be cleanedup is
+ * registered at the cpu's IRR. If so, then this is not
+ * the best time to clean it up. Lets clean it up in the
+ * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
+ * to myself.
+ */
+ if (irr & (1 << (vector % 32))) {
+ apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
+ goto unlock;
+ }
+ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+unlock:
+ raw_spin_unlock(&desc->lock);
+ }
+
+ irq_exit();
+}
+
+static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+{
+ unsigned me;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ me = smp_processor_id();
+
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+ send_cleanup_vector(cfg);
+}
+
+void irq_complete_move(struct irq_cfg *cfg)
+{
+ __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
+}
+
+void irq_force_complete_move(int irq)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ if (!cfg)
+ return;
+
+ __irq_complete_move(cfg, cfg->vector);
+}
+#endif
+
+/*
+ * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
+ */
+int arch_setup_hwirq(unsigned int irq, int node)
+{
+ struct irq_cfg *cfg;
+ unsigned long flags;
+ int ret;
+
+ cfg = alloc_irq_cfg(irq, node);
+ if (!cfg)
+ return -ENOMEM;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+ if (!ret)
+ irq_set_chip_data(irq, cfg);
+ else
+ free_irq_cfg(irq, cfg);
+ return ret;
+}
+
+void arch_teardown_hwirq(unsigned int irq)
+{
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ free_remapped_irq(irq);
+ clear_irq_vector(irq, cfg);
+ free_irq_cfg(irq, cfg);
+}
+
+static void __init print_APIC_field(int base)
+{
+ int i;
+
+ printk(KERN_DEBUG);
+
+ for (i = 0; i < 8; i++)
+ pr_cont("%08x", apic_read(base + i*0x10));
+
+ pr_cont("\n");
+}
+
+static void __init print_local_APIC(void *dummy)
+{
+ unsigned int i, v, ver, maxlvt;
+ u64 icr;
+
+ pr_debug("printing local APIC contents on CPU#%d/%d:\n",
+ smp_processor_id(), hard_smp_processor_id());
+ v = apic_read(APIC_ID);
+ pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
+ v = apic_read(APIC_LVR);
+ pr_info("... APIC VERSION: %08x\n", v);
+ ver = GET_APIC_VERSION(v);
+ maxlvt = lapic_get_maxlvt();
+
+ v = apic_read(APIC_TASKPRI);
+ pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+
+ /* !82489DX */
+ if (APIC_INTEGRATED(ver)) {
+ if (!APIC_XAPIC(ver)) {
+ v = apic_read(APIC_ARBPRI);
+ pr_debug("... APIC ARBPRI: %08x (%02x)\n",
+ v, v & APIC_ARBPRI_MASK);
+ }
+ v = apic_read(APIC_PROCPRI);
+ pr_debug("... APIC PROCPRI: %08x\n", v);
+ }
+
+ /*
+ * Remote read supported only in the 82489DX and local APIC for
+ * Pentium processors.
+ */
+ if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
+ v = apic_read(APIC_RRR);
+ pr_debug("... APIC RRR: %08x\n", v);
+ }
+
+ v = apic_read(APIC_LDR);
+ pr_debug("... APIC LDR: %08x\n", v);
+ if (!x2apic_enabled()) {
+ v = apic_read(APIC_DFR);
+ pr_debug("... APIC DFR: %08x\n", v);
+ }
+ v = apic_read(APIC_SPIV);
+ pr_debug("... APIC SPIV: %08x\n", v);
+
+ pr_debug("... APIC ISR field:\n");
+ print_APIC_field(APIC_ISR);
+ pr_debug("... APIC TMR field:\n");
+ print_APIC_field(APIC_TMR);
+ pr_debug("... APIC IRR field:\n");
+ print_APIC_field(APIC_IRR);
+
+ /* !82489DX */
+ if (APIC_INTEGRATED(ver)) {
+ /* Due to the Pentium erratum 3AP. */
+ if (maxlvt > 3)
+ apic_write(APIC_ESR, 0);
+
+ v = apic_read(APIC_ESR);
+ pr_debug("... APIC ESR: %08x\n", v);
+ }
+
+ icr = apic_icr_read();
+ pr_debug("... APIC ICR: %08x\n", (u32)icr);
+ pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
+
+ v = apic_read(APIC_LVTT);
+ pr_debug("... APIC LVTT: %08x\n", v);
+
+ if (maxlvt > 3) {
+ /* PC is LVT#4. */
+ v = apic_read(APIC_LVTPC);
+ pr_debug("... APIC LVTPC: %08x\n", v);
+ }
+ v = apic_read(APIC_LVT0);
+ pr_debug("... APIC LVT0: %08x\n", v);
+ v = apic_read(APIC_LVT1);
+ pr_debug("... APIC LVT1: %08x\n", v);
+
+ if (maxlvt > 2) {
+ /* ERR is LVT#3. */
+ v = apic_read(APIC_LVTERR);
+ pr_debug("... APIC LVTERR: %08x\n", v);
+ }
+
+ v = apic_read(APIC_TMICT);
+ pr_debug("... APIC TMICT: %08x\n", v);
+ v = apic_read(APIC_TMCCT);
+ pr_debug("... APIC TMCCT: %08x\n", v);
+ v = apic_read(APIC_TDCR);
+ pr_debug("... APIC TDCR: %08x\n", v);
+
+ if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
+ v = apic_read(APIC_EFEAT);
+ maxlvt = (v >> 16) & 0xff;
+ pr_debug("... APIC EFEAT: %08x\n", v);
+ v = apic_read(APIC_ECTRL);
+ pr_debug("... APIC ECTRL: %08x\n", v);
+ for (i = 0; i < maxlvt; i++) {
+ v = apic_read(APIC_EILVTn(i));
+ pr_debug("... APIC EILVT%d: %08x\n", i, v);
+ }
+ }
+ pr_cont("\n");
+}
+
+static void __init print_local_APICs(int maxcpu)
+{
+ int cpu;
+
+ if (!maxcpu)
+ return;
+
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ if (cpu >= maxcpu)
+ break;
+ smp_call_function_single(cpu, print_local_APIC, NULL, 1);
+ }
+ preempt_enable();
+}
+
+static void __init print_PIC(void)
+{
+ unsigned int v;
+ unsigned long flags;
+
+ if (!nr_legacy_irqs())
+ return;
+
+ pr_debug("\nprinting PIC contents\n");
+
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+ v = inb(0xa1) << 8 | inb(0x21);
+ pr_debug("... PIC IMR: %04x\n", v);
+
+ v = inb(0xa0) << 8 | inb(0x20);
+ pr_debug("... PIC IRR: %04x\n", v);
+
+ outb(0x0b, 0xa0);
+ outb(0x0b, 0x20);
+ v = inb(0xa0) << 8 | inb(0x20);
+ outb(0x0a, 0xa0);
+ outb(0x0a, 0x20);
+
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+
+ pr_debug("... PIC ISR: %04x\n", v);
+
+ v = inb(0x4d1) << 8 | inb(0x4d0);
+ pr_debug("... PIC ELCR: %04x\n", v);
+}
+
+static int show_lapic __initdata = 1;
+static __init int setup_show_lapic(char *arg)
+{
+ int num = -1;
+
+ if (strcmp(arg, "all") == 0) {
+ show_lapic = CONFIG_NR_CPUS;
+ } else {
+ get_option(&arg, &num);
+ if (num >= 0)
+ show_lapic = num;
+ }
+
+ return 1;
+}
+__setup("show_lapic=", setup_show_lapic);
+
+static int __init print_ICs(void)
+{
+ if (apic_verbosity == APIC_QUIET)
+ return 0;
+
+ print_PIC();
+
+ /* don't print out if apic is not there */
+ if (!cpu_has_apic && !apic_from_smp_config())
+ return 0;
+
+ print_local_APICs(show_lapic);
+ print_IO_APICs();
+
+ return 0;
+}
+
+late_initcall(print_ICs);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 08f3fed2b0f2..10b8d3eaaf15 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -276,6 +276,17 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
return box;
}
+/*
+ * Using uncore_pmu_event_init pmu event_init callback
+ * as a detection point for uncore events.
+ */
+static int uncore_pmu_event_init(struct perf_event *event);
+
+static bool is_uncore_event(struct perf_event *event)
+{
+ return event->pmu->event_init == uncore_pmu_event_init;
+}
+
static int
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
{
@@ -290,13 +301,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
return -EINVAL;
n = box->n_events;
- box->event_list[n] = leader;
- n++;
+
+ if (is_uncore_event(leader)) {
+ box->event_list[n] = leader;
+ n++;
+ }
+
if (!dogrp)
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
- if (event->state <= PERF_EVENT_STATE_OFF)
+ if (!is_uncore_event(event) ||
+ event->state <= PERF_EVENT_STATE_OFF)
continue;
if (n >= max_count)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index f5ab56d14287..aceb2f90c716 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -28,6 +28,7 @@
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/cpu.h>
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 1cf7c97ff175..000d4199b03e 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -732,10 +732,10 @@ ENTRY(interrupt)
ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=FIRST_EXTERNAL_VECTOR
-.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
.balign 32
.rept 7
- .if vector < NR_VECTORS
+ .if vector < FIRST_SYSTEM_VECTOR
.if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -4
.endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 90878aa38dbd..9ebaf63ba182 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -740,10 +740,10 @@ ENTRY(interrupt)
ENTRY(irq_entries_start)
INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR
-.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
.balign 32
.rept 7
- .if vector < NR_VECTORS
+ .if vector < FIRST_SYSTEM_VECTOR
.if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -8
.endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 4de73ee78361..70e181ea1eac 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -99,32 +99,9 @@ void __init init_IRQ(void)
x86_init.irqs.intr_init();
}
-/*
- * Setup the vector to irq mappings.
- */
-void setup_vector_irq(int cpu)
-{
-#ifndef CONFIG_X86_IO_APIC
- int irq;
-
- /*
- * On most of the platforms, legacy PIC delivers the interrupts on the
- * boot cpu. But there are certain platforms where PIC interrupts are
- * delivered to multiple cpu's. If the legacy IRQ is handled by the
- * legacy PIC, for the new cpu that is coming online, setup the static
- * legacy vector to irq mapping:
- */
- for (irq = 0; irq < nr_legacy_irqs(); irq++)
- per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
-#endif
-
- __setup_vector_irq(cpu);
-}
-
static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
@@ -144,7 +121,6 @@ static void __init smp_intr_init(void)
/* IPI used for rebooting/stopping */
alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
-#endif
#endif /* CONFIG_SMP */
}
@@ -159,7 +135,7 @@ static void __init apic_intr_init(void)
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+#ifdef CONFIG_X86_LOCAL_APIC
/* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
@@ -197,10 +173,17 @@ void __init native_init_IRQ(void)
* 'special' SMP interrupts)
*/
i = FIRST_EXTERNAL_VECTOR;
- for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
+#ifndef CONFIG_X86_LOCAL_APIC
+#define first_system_vector NR_VECTORS
+#endif
+ for_each_clear_bit_from(i, used_vectors, first_system_vector) {
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
}
+#ifdef CONFIG_X86_LOCAL_APIC
+ for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
+ set_intr_gate(i, spurious_interrupt);
+#endif
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
setup_irq(2, &irq2);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 72e8e310258d..469b23d6acc2 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -20,6 +20,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/cpufeature.h>
#include <asm/desc.h>
#include <asm/cacheflush.h>
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 485981059a40..415480d3ea84 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/io_apic.h>
#include <asm/debugreg.h>
#include <asm/kexec-bzimage64.h>
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 17962e667a91..bae6c609888e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -12,6 +12,7 @@
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7a8f5845e8eb..6d7022c683e3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1084,7 +1084,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int i;
- preempt_disable();
smp_cpu_index_default();
/*
@@ -1102,22 +1101,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
}
set_cpu_sibling_map(0);
-
if (smp_sanity_check(max_cpus) < 0) {
pr_info("SMP disabled\n");
disable_smp();
- goto out;
+ return;
}
default_setup_apic_routing();
- preempt_disable();
if (read_apic_id() != boot_cpu_physical_apicid) {
panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
read_apic_id(), boot_cpu_physical_apicid);
/* Or can we switch back to PIC here? */
}
- preempt_enable();
connect_bsp_APIC();
@@ -1151,8 +1147,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
uv_system_init();
set_mtrr_aps_delayed_init();
-out:
- preempt_enable();
}
void arch_enable_nonboot_cpus_begin(void)
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 3e551eee87b9..4e942f31b1a7 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -55,12 +55,6 @@ static bool tls_desc_okay(const struct user_desc *info)
if (info->seg_not_present)
return false;
-#ifdef CONFIG_X86_64
- /* The L bit makes no sense for data. */
- if (info->lm)
- return false;
-#endif
-
return true;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a9ae20579895..88900e288021 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -331,7 +331,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
break; /* Success, it was handled */
case 1: /* Bound violation. */
info = mpx_generate_siginfo(regs, xsave_buf);
- if (PTR_ERR(info)) {
+ if (IS_ERR(info)) {
/*
* We failed to decode the MPX instruction. Act as if
* the exception was not caused by MPX.
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index aae94132bc24..c1c1544b8485 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -841,7 +841,7 @@ static void __init lguest_init_IRQ(void)
{
unsigned int i;
- for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
+ for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
/* Some systems map "vectors" to interrupts weirdly. Not us! */
__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
if (i != SYSCALL_VECTOR)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index b9958c364075..44b9271580b5 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -210,6 +210,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
int polarity;
+ if (dev->irq_managed && dev->irq > 0)
+ return 0;
+
if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
polarity = 0; /* active high */
else
@@ -224,13 +227,18 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC) < 0)
return -EBUSY;
+ dev->irq_managed = 1;
+
return 0;
}
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0)
+ if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+ dev->irq > 0) {
mp_unmap_irq(dev->irq);
+ dev->irq_managed = 0;
+ }
}
struct pci_ops intel_mid_pci_ops = {
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index eb500c2592ad..5dc6ca5e1741 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1200,11 +1200,12 @@ static int pirq_enable_irq(struct pci_dev *dev)
#ifdef CONFIG_X86_IO_APIC
struct pci_dev *temp_dev;
int irq;
- struct io_apic_irq_attr irq_attr;
+
+ if (dev->irq_managed && dev->irq > 0)
+ return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
- PCI_SLOT(dev->devfn),
- pin - 1, &irq_attr);
+ PCI_SLOT(dev->devfn), pin - 1);
/*
* Busses behind bridges are typically not listed in the MP-table.
* In this case we have to look up the IRQ based on the parent bus,
@@ -1218,7 +1219,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
pin = pci_swizzle_interrupt_pin(dev, pin);
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
PCI_SLOT(bridge->devfn),
- pin - 1, &irq_attr);
+ pin - 1);
if (irq >= 0)
dev_warn(&dev->dev, "using bridge %s "
"INT %c to get IRQ %d\n",
@@ -1228,6 +1229,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
}
dev = temp_dev;
if (irq >= 0) {
+ dev->irq_managed = 1;
dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
@@ -1254,11 +1256,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
+bool mp_should_keep_irq(struct device *dev)
+{
+ if (dev->power.is_prepared)
+ return true;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return true;
+#endif
+
+ return false;
+}
+
static void pirq_disable_irq(struct pci_dev *dev)
{
if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
- dev->irq) {
+ dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
dev->irq = 0;
+ dev->irq_managed = 0;
}
}
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index b233681af4de..0ce673645432 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
- struct irq_cfg *cfg = irq_get_chip_data(irq);
+ struct irq_cfg *cfg = irq_cfg(irq);
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err;
@@ -198,13 +198,13 @@ static int
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct irq_cfg *cfg = data->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
unsigned int dest;
unsigned long mmr_value, mmr_offset;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode;
- if (__ioapic_set_affinity(data, mask, &dest))
+ if (apic_set_affinity(data, mask, &dest))
return -1;
mmr_value = 0;