diff options
Diffstat (limited to 'arch')
507 files changed, 13730 insertions, 7521 deletions
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h index 70145cbb21cb..1b71ca70c9f6 100644 --- a/arch/alpha/include/asm/fcntl.h +++ b/arch/alpha/include/asm/fcntl.h @@ -31,6 +31,8 @@ #define __O_SYNC 020000000 #define O_SYNC (__O_SYNC|O_DSYNC) +#define O_PATH 040000000 + #define F_GETLK 7 #define F_SETLK 8 #define F_SETLKW 9 diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index 945de222ab91..e8a761aee088 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -29,7 +29,7 @@ : "r" (uaddr), "r"(oparg) \ : "memory") -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int prev, cmp; + int ret = 0, cmp; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( __ASM_SMP_MB - "1: ldl_l %0,0(%2)\n" - " cmpeq %0,%3,%1\n" - " beq %1,3f\n" - " mov %4,%1\n" - "2: stl_c %1,0(%2)\n" - " beq %1,4f\n" + "1: ldl_l %1,0(%3)\n" + " cmpeq %1,%4,%2\n" + " beq %2,3f\n" + " mov %5,%2\n" + "2: stl_c %2,0(%3)\n" + " beq %2,4f\n" "3: .subsection 2\n" "4: br 1b\n" " .previous\n" @@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .long 2b-.\n" " lda $31,3b-2b(%0)\n" " .previous\n" - : "=&r"(prev), "=&r"(cmp) + : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)oldval), "r"(newval) : "memory"); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h index 034b6cf5d9f3..80e1cee90f1f 100644 --- a/arch/alpha/include/asm/ioctls.h +++ b/arch/alpha/include/asm/ioctls.h @@ -94,6 +94,7 @@ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h index 1570c0b54336..a83bbea62c67 100644 --- a/arch/alpha/include/asm/rwsem.h +++ b/arch/alpha/include/asm/rwsem.h @@ -13,44 +13,13 @@ #ifdef __KERNEL__ #include <linux/compiler.h> -#include <linux/list.h> -#include <linux/spinlock.h> -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* - * the semaphore definition - */ -struct rw_semaphore { - long count; #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L #define RWSEM_ACTIVE_BIAS 0x0000000000000001L #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL #define RWSEM_WAITING_BIAS (-0x0000000100000000L) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -}; - -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} static inline void __down_read(struct rw_semaphore *sem) { @@ -250,10 +219,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) #endif } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _ALPHA_RWSEM_H */ diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index fe698b5045e9..376f22130791 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -230,44 +230,24 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } -static int -do_osf_statfs(struct path *path, struct osf_statfs __user *buffer, - unsigned long bufsiz) +SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, + struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; - int error = vfs_statfs(path, &linux_stat); + int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } -SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, - struct osf_statfs __user *, buffer, unsigned long, bufsiz) -{ - struct path path; - int retval; - - retval = user_path(pathname, &path); - if (!retval) { - retval = do_osf_statfs(&path, buffer, bufsiz); - path_put(&path); - } - return retval; -} - SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { - struct file *file; - int retval; - - retval = -EBADF; - file = fget(fd); - if (file) { - retval = do_osf_statfs(&file->f_path, buffer, bufsiz); - fput(file); - } - return retval; + struct kstatfs linux_stat; + int error = fd_statfs(fd, &linux_stat); + if (!error) + error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); + return error; } /* diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index f6c108a3d673..8c13a0c77830 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -149,6 +149,7 @@ static int titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { + unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cpu_set_irq_affinity(irq - 16, *affinity); titan_update_irq_hw(titan_cached_irq_mask); diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index c1f3e7cb82a4..a58e84f1a63b 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -159,7 +159,7 @@ void read_persistent_clock(struct timespec *ts) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ irqreturn_t timer_interrupt(int irq, void *dev) { @@ -172,8 +172,6 @@ irqreturn_t timer_interrupt(int irq, void *dev) profile_tick(CPU_PROFILING); #endif - write_seqlock(&xtime_lock); - /* * Calculate how many ticks have passed since the last update, * including any previous partial leftover. Save any resulting @@ -187,9 +185,7 @@ irqreturn_t timer_interrupt(int irq, void *dev) nticks = delta >> FIX_SHIFT; if (nticks) - do_timer(nticks); - - write_sequnlock(&xtime_lock); + xtime_update(nticks); if (test_irq_work_pending()) { clear_irq_work_pending(); diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 003ef4c02585..433be2a24f31 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -1,5 +1,6 @@ #include <asm-generic/vmlinux.lds.h> #include <asm/thread_info.h> +#include <asm/cache.h> #include <asm/page.h> OUTPUT_FORMAT("elf64-alpha") @@ -38,7 +39,7 @@ SECTIONS __init_begin = ALIGN(PAGE_SIZE); INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - PERCPU(PAGE_SIZE) + PERCPU(L1_CACHE_BYTES, PAGE_SIZE) /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page needed for the THREAD_SIZE aligned init_task gets freed after init */ . = ALIGN(THREAD_SIZE); @@ -46,7 +47,7 @@ SECTIONS /* Freed after init ends here */ _data = .; - RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .got : { *(.got) diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig new file mode 100644 index 000000000000..7a9267e5da55 --- /dev/null +++ b/arch/arm/configs/tegra_defconfig @@ -0,0 +1,123 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_ELF_CORE is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_TEGRA=y +CONFIG_MACH_HARMONY=y +CONFIG_TEGRA_DEBUG_UARTD=y +CONFIG_ARM_ERRATA_742230=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_SMP=y +CONFIG_NR_CPUS=2 +CONFIG_PREEMPT=y +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +CONFIG_HIGHMEM=y +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_VFP=y +CONFIG_PM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_WIRELESS is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_MISC_DEVICES=y +CONFIG_AD525X_DPOT=y +CONFIG_AD525X_DPOT_I2C=y +CONFIG_ICS932S401=y +CONFIG_APDS9802ALS=y +CONFIG_ISL29003=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +# CONFIG_HWMON is not set +# CONFIG_MFD_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_DNOTIFY is not set +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_SLAB=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_SPINLOCK_SLEEP=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_SG=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_DEBUG_LL=y +CONFIG_EARLY_PRINTK=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_TWOFISH=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index b33fe7065b38..199a6b6de7f4 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -35,7 +35,7 @@ : "cc", "memory") static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); /* implies preempt_disable() */ @@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int val; + int ret = 0; + u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - pagefault_disable(); /* implies preempt_disable() */ - __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: " T(ldr) " %0, [%3]\n" - " teq %0, %1\n" + "1: " T(ldr) " %1, [%4]\n" + " teq %1, %2\n" " it eq @ explicit IT needed for the 2b label\n" - "2: " T(streq) " %2, [%3]\n" + "2: " T(streq) " %3, [%4]\n" "3:\n" " .pushsection __ex_table,\"a\"\n" " .align 3\n" " .long 1b, 4f, 2b, 4f\n" " .popsection\n" " .pushsection .fixup,\"ax\"\n" - "4: mov %0, %4\n" + "4: mov %0, %5\n" " b 3b\n" " .popsection" - : "=&r" (val) + : "+r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); - pagefault_enable(); /* subsumes preempt_enable() */ - - return val; + *uval = val; + return ret; } #endif /* !SMP */ diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 3d76bf233734..1ff46cabc7ef 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -107,9 +107,7 @@ void timer_tick(void) { profile_tick(CPU_PROFILING); do_leds(); - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index dfbb377e251d..b4348e62ef06 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -82,7 +82,7 @@ SECTIONS #endif } - PERCPU(PAGE_SIZE) + PERCPU(32, PAGE_SIZE) #ifndef CONFIG_XIP_KERNEL . = ALIGN(PAGE_SIZE); diff --git a/arch/arm/mach-clps711x/include/mach/time.h b/arch/arm/mach-clps711x/include/mach/time.h index 8fe283ccd1f3..61fef9129c6a 100644 --- a/arch/arm/mach-clps711x/include/mach/time.h +++ b/arch/arm/mach-clps711x/include/mach/time.h @@ -30,7 +30,7 @@ p720t_timer_interrupt(int irq, void *dev_id) { struct pt_regs *regs = get_irq_regs(); do_leds(); - do_timer(1); + xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(regs)); #endif diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 1c0c2b02d870..64dc4176407b 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -229,7 +229,7 @@ usbfs-$(CONFIG_ARCH_OMAP_OTG) := usb-fs.o obj-y += $(usbfs-m) $(usbfs-y) obj-y += usb-musb.o obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o -obj-y += usb-ehci.o +obj-y += usb-host.o onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o obj-y += $(onenand-m) $(onenand-y) diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index d4e41ef86aa5..7542ba59f2b8 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -653,11 +653,11 @@ static void enable_board_wakeup_source(void) OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = 57, @@ -816,7 +816,7 @@ static void __init omap_3430sdp_init(void) board_flash_init(sdp_flash_partitions, chip_sel_3430); sdp3430_display_init(); enable_board_wakeup_source(); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); } MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board") diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c index 62645640f5e4..deed2db32c53 100644 --- a/arch/arm/mach-omap2/board-3630sdp.c +++ b/arch/arm/mach-omap2/board-3630sdp.c @@ -54,11 +54,11 @@ static void enable_board_wakeup_source(void) OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = 126, @@ -211,7 +211,7 @@ static void __init omap_sdp_init(void) board_smc91x_init(); board_flash_init(sdp_flash_partitions, chip_sel_sdp); enable_board_wakeup_source(); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); } MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board") diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 07d1b20b1148..f603f3b04cb8 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -44,7 +44,6 @@ #define ETH_KS8851_IRQ 34 #define ETH_KS8851_POWER_ON 48 #define ETH_KS8851_QUART 138 -#define OMAP4SDP_MDM_PWR_EN_GPIO 157 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 #define OMAP4_SFH7741_ENABLE_GPIO 188 @@ -251,16 +250,6 @@ static void __init omap_4430sdp_init_irq(void) gic_init_irq(); } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .phy_reset = false, - .reset_gpio_port[0] = -EINVAL, - .reset_gpio_port[1] = -EINVAL, - .reset_gpio_port[2] = -EINVAL, -}; - static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_UTMI, .mode = MUSB_OTG, @@ -272,6 +261,7 @@ static struct twl4030_usb_data omap4_usbphy_data = { .phy_exit = omap4430_phy_exit, .phy_power = omap4430_phy_power, .phy_set_clock = omap4430_phy_set_clk, + .phy_suspend = omap4430_phy_suspend, }; static struct omap2_hsmmc_info mmc[] = { @@ -576,14 +566,6 @@ static void __init omap_4430sdp_init(void) omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); - /* Power on the ULPI PHY */ - status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3"); - if (status) - pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__); - else - gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1); - - usb_ehci_init(&ehci_pdata); usb_musb_init(&musb_board_data); status = omap_ethernet_init(); diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c index 71acb5ab281c..e3a194f6b13f 100644 --- a/arch/arm/mach-omap2/board-am3517crane.c +++ b/arch/arm/mach-omap2/board-am3517crane.c @@ -59,10 +59,10 @@ static void __init am3517_crane_init_irq(void) omap_init_irq(); } -static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static struct usbhs_omap_board_data usbhs_bdata __initdata = { + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = GPIO_USB_NRESET, @@ -103,7 +103,7 @@ static void __init am3517_crane_init(void) return; } - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); } MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD") diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index 10d60b7743cf..913538ad17d8 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c @@ -430,15 +430,15 @@ static __init void am3517_evm_musb_init(void) usb_musb_init(&musb_board_data); } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, #if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE) - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, #else - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, #endif - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = 57, @@ -502,7 +502,7 @@ static void __init am3517_evm_init(void) /* Configure GPIO for EHCI port */ omap_mux_init_gpio(57, OMAP_PIN_OUTPUT); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); am3517_evm_hecc_init(&am3517_evm_hecc_pdata); /* DSS */ am3517_evm_display_init(); diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index dac141610666..9be7289cbb56 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -605,10 +605,10 @@ static struct omap2_hsmmc_info mmc[] = { {} /* Terminator */ }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static struct usbhs_omap_board_data usbhs_bdata __initdata = { + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = OMAP_MAX_GPIO_LINES + 6, @@ -810,7 +810,7 @@ static void __init cm_t35_init(void) cm_t35_init_display(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); } MACHINE_START(CM_T35, "Compulab CM-T35") diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c index 8f9a64d650ee..8e18dc76b11e 100644 --- a/arch/arm/mach-omap2/board-cm-t3517.c +++ b/arch/arm/mach-omap2/board-cm-t3517.c @@ -167,10 +167,10 @@ static inline void cm_t3517_init_rtc(void) {} #define HSUSB2_RESET_GPIO (147) #define USB_HUB_RESET_GPIO (152) -static struct ehci_hcd_omap_platform_data cm_t3517_ehci_pdata __initdata = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = { + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = HSUSB1_RESET_GPIO, @@ -192,7 +192,7 @@ static int cm_t3517_init_usbh(void) msleep(1); } - usb_ehci_init(&cm_t3517_ehci_pdata); + usbhs_init(&cm_t3517_ehci_pdata); return 0; } diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 9a2a31e011ce..bc0141b98694 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -620,11 +620,11 @@ static struct omap_musb_board_data musb_board_data = { .power = 100, }; -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, @@ -803,7 +803,7 @@ static void __init devkit8000_init(void) devkit8000_ads7846_init(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); devkit8000_flash_init(); /* Ensure SDRC pins are mux'd for self-refresh */ diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 3be85a1f55f4..f9f534419311 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -627,10 +627,10 @@ static struct omap_musb_board_data musb_board_data = { .power = 100, }; -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET, @@ -699,7 +699,7 @@ static void __init igep2_init(void) platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices)); omap_serial_init(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); igep2_flash_init(); igep2_leds_init(); diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c index 4dc62a9b9cb2..579fc2d2525f 100644 --- a/arch/arm/mach-omap2/board-igep0030.c +++ b/arch/arm/mach-omap2/board-igep0030.c @@ -408,10 +408,10 @@ static void __init igep3_wifi_bt_init(void) void __init igep3_wifi_bt_init(void) {} #endif -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, @@ -435,7 +435,7 @@ static void __init igep3_init(void) platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices)); omap_serial_init(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); igep3_flash_init(); igep3_leds_init(); diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 46d814ab5656..f0963b6e4627 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -586,11 +586,11 @@ static void __init omap3beagle_flash_init(void) } } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, @@ -625,7 +625,7 @@ static void __init omap3_beagle_init(void) gpio_direction_output(170, true); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); omap3beagle_flash_init(); /* Ensure SDRC pins are mux'd for self-refresh */ diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 323c3809ce39..38a2d91790c0 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -638,11 +638,11 @@ static struct platform_device *omap3_evm_devices[] __initdata = { &omap3_evm_dss_device, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { +static struct usbhs_omap_board_data usbhs_bdata __initdata = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, /* PHY reset GPIO will be runtime programmed based on EVM version */ @@ -700,7 +700,7 @@ static void __init omap3_evm_init(void) /* setup EHCI phy reset config */ omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP); - ehci_pdata.reset_gpio_port[1] = 21; + usbhs_bdata.reset_gpio_port[1] = 21; /* EVM REV >= E can supply 500mA with EXTVBUS programming */ musb_board_data.power = 500; @@ -708,10 +708,10 @@ static void __init omap3_evm_init(void) } else { /* setup EHCI phy reset on MDC */ omap_mux_init_gpio(135, OMAP_PIN_OUTPUT); - ehci_pdata.reset_gpio_port[1] = 135; + usbhs_bdata.reset_gpio_port[1] = 135; } usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); ads7846_dev_init(); omap3evm_init_smsc911x(); omap3_evm_display_init(); diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 0b34beded11f..aa05f2e46a61 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -681,11 +681,11 @@ static struct platform_device *omap3pandora_devices[] __initdata = { &pandora_vwlan_device, }; -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = 16, @@ -716,7 +716,7 @@ static void __init omap3pandora_init(void) spi_register_board_info(omap3pandora_spi_board_info, ARRAY_SIZE(omap3pandora_spi_board_info)); omap3pandora_ads7846_init(); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); usb_musb_init(&musb_board_data); gpmc_nand_init(&pandora_nand_data); diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index 2a2dad447e86..f6c87787cd4f 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c @@ -608,10 +608,10 @@ static struct platform_device *omap3_stalker_devices[] __initdata = { &keys_gpio, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, @@ -649,7 +649,7 @@ static void __init omap3_stalker_init(void) omap_serial_init(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); ads7846_dev_init(); omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index db1f74fe6c4f..84cfddb19a74 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -468,11 +468,11 @@ static void __init omap3touchbook_flash_init(void) } } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, @@ -527,7 +527,7 @@ static void __init omap3_touchbook_init(void) ARRAY_SIZE(omap3_ads7846_spi_board_info)); omap3_ads7846_init(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); omap3touchbook_flash_init(); /* Ensure SDRC pins are mux'd for self-refresh */ diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e944025d5ef8..ed61c1f5d5e6 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -83,10 +83,10 @@ static void __init omap4_panda_init_irq(void) gic_init_irq(); } -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = false, .reset_gpio_port[0] = -EINVAL, .reset_gpio_port[1] = -EINVAL, @@ -128,7 +128,7 @@ static void __init omap4_ehci_init(void) gpio_set_value(GPIO_HUB_NRESET, 0); gpio_set_value(GPIO_HUB_NRESET, 1); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); /* enable power to hub */ gpio_set_value(GPIO_HUB_POWER, 1); @@ -153,6 +153,7 @@ static struct twl4030_usb_data omap4_usbphy_data = { .phy_exit = omap4430_phy_exit, .phy_power = omap4430_phy_power, .phy_set_clock = omap4430_phy_set_clk, + .phy_suspend = omap4430_phy_suspend, }; static struct omap2_hsmmc_info mmc[] = { diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index cb26e5d8268d..08770ccec0f3 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c @@ -423,10 +423,10 @@ static struct platform_device *overo_devices[] __initdata = { &overo_lcd_device, }; -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, @@ -454,7 +454,7 @@ static void __init overo_init(void) omap_serial_init(); overo_flash_init(); usb_musb_init(&musb_board_data); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); overo_ads7846_init(); overo_init_smsc911x(); diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c index e26754c24ee8..1dd195afa396 100644 --- a/arch/arm/mach-omap2/board-zoom.c +++ b/arch/arm/mach-omap2/board-zoom.c @@ -106,10 +106,10 @@ static struct mtd_partition zoom_nand_partitions[] = { }, }; -static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { - .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, - .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, - .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, +static const struct usbhs_omap_board_data usbhs_bdata __initconst = { + .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, + .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, + .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, .reset_gpio_port[1] = ZOOM3_EHCI_RESET_GPIO, @@ -123,7 +123,7 @@ static void __init omap_zoom_init(void) } else if (machine_is_omap_zoom3()) { omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); omap_mux_init_gpio(ZOOM3_EHCI_RESET_GPIO, OMAP_PIN_OUTPUT); - usb_ehci_init(&ehci_pdata); + usbhs_init(&usbhs_bdata); } board_nand_init(zoom_nand_partitions, diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index 403a4a1d3f9c..fbb1e30a73dc 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c @@ -3286,7 +3286,7 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), - CLK("ehci-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), + CLK("usbhs-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX), CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX), CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX), @@ -3322,7 +3322,7 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX), CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX), CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), - CLK("ehci-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), + CLK("usbhs-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX), CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX), @@ -3368,11 +3368,20 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX), CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX), CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), - CLK("ehci-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), + CLK("usbhs-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), - CLK("ehci-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), + CLK("usbhs-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), - CLK("ehci-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), + CLK("usbhs-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), + CLK("usbhs-omap.0", "utmi_p1_gfclk", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "utmi_p2_gfclk", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX), + CLK("usbhs-omap.0", "init_60m_fclk", &dummy_ck, CK_3XXX), CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX), CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX), CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX), diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index de9ec8ddd2ae..46fd3f674cac 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c @@ -3197,7 +3197,7 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "uart3_fck", &uart3_fck, CK_443X), CLK(NULL, "uart4_fck", &uart4_fck, CK_443X), CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X), - CLK("ehci-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X), + CLK("usbhs-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X), CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X), CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X), CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X), @@ -3209,8 +3209,8 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X), CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X), CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X), - CLK("ehci-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X), - CLK("ehci-omap.0", "usbhost_ick", &dummy_ck, CK_443X), + CLK("usbhs-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X), + CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X), CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X), CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X), CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X), @@ -3219,8 +3219,8 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X), CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X), CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X), - CLK("ehci-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X), - CLK("ehci-omap.0", "usbtll_fck", &dummy_ck, CK_443X), + CLK("usbhs-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X), + CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X), CLK(NULL, "usim_ck", &usim_ck, CK_443X), CLK(NULL, "usim_fclk", &usim_fclk, CK_443X), CLK(NULL, "usim_fck", &usim_fck, CK_443X), diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c index 745252c60e32..ebe33df708bd 100644 --- a/arch/arm/mach-omap2/omap_phy_internal.c +++ b/arch/arm/mach-omap2/omap_phy_internal.c @@ -43,6 +43,7 @@ static struct clk *phyclk, *clk48m, *clk32k; static void __iomem *ctrl_base; +static int usbotghs_control; int omap4430_phy_init(struct device *dev) { @@ -103,13 +104,6 @@ int omap4430_phy_set_clk(struct device *dev, int on) int omap4430_phy_power(struct device *dev, int ID, int on) { if (on) { - /* enabled the clocks */ - omap4430_phy_set_clk(dev, 1); - /* power on the phy */ - if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) { - __raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF); - mdelay(200); - } if (ID) /* enable VBUS valid, IDDIG groung */ __raw_writel(AVALID | VBUSVALID, ctrl_base + @@ -125,10 +119,31 @@ int omap4430_phy_power(struct device *dev, int ID, int on) /* Enable session END and IDIG to high impedence. */ __raw_writel(SESSEND | IDDIG, ctrl_base + USBOTGHS_CONTROL); + } + return 0; +} + +int omap4430_phy_suspend(struct device *dev, int suspend) +{ + if (suspend) { /* Disable the clocks */ omap4430_phy_set_clk(dev, 0); /* Power down the phy */ __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); + + /* save the context */ + usbotghs_control = __raw_readl(ctrl_base + USBOTGHS_CONTROL); + } else { + /* Enable the internel phy clcoks */ + omap4430_phy_set_clk(dev, 1); + /* power on the phy */ + if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) { + __raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF); + mdelay(200); + } + + /* restore the context */ + __raw_writel(usbotghs_control, ctrl_base + USBOTGHS_CONTROL); } return 0; diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-host.c index 25eeadabc39b..89ae29847c59 100644 --- a/arch/arm/mach-omap2/usb-ehci.c +++ b/arch/arm/mach-omap2/usb-host.c @@ -1,14 +1,15 @@ /* - * linux/arch/arm/mach-omap2/usb-ehci.c + * usb-host.c - OMAP USB Host * * This file will contain the board specific details for the - * Synopsys EHCI host controller on OMAP3430 + * Synopsys EHCI/OHCI host controller on OMAP3430 and onwards * - * Copyright (C) 2007 Texas Instruments + * Copyright (C) 2007-2011 Texas Instruments * Author: Vikram Pandita <vikram.pandita@ti.com> + * Author: Keshava Munegowda <keshava_mgowda@ti.com> * * Generalization by: - * Felipe Balbi <felipe.balbi@nokia.com> + * Felipe Balbi <balbi@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -19,7 +20,7 @@ #include <linux/errno.h> #include <linux/delay.h> #include <linux/platform_device.h> -#include <linux/clk.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <asm/io.h> @@ -30,44 +31,56 @@ #include "mux.h" -#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_EHCI_HCD_MODULE) +#ifdef CONFIG_MFD_OMAP_USB_HOST -static struct resource ehci_resources[] = { +#define OMAP_USBHS_DEVICE "usbhs-omap" + +static struct resource usbhs_resources[] = { + { + .name = "uhh", + .flags = IORESOURCE_MEM, + }, { + .name = "tll", .flags = IORESOURCE_MEM, }, { + .name = "ehci", .flags = IORESOURCE_MEM, }, { + .name = "ehci-irq", + .flags = IORESOURCE_IRQ, + }, + { + .name = "ohci", .flags = IORESOURCE_MEM, }, - { /* general IRQ */ - .flags = IORESOURCE_IRQ, + { + .name = "ohci-irq", + .flags = IORESOURCE_IRQ, } }; -static u64 ehci_dmamask = ~(u32)0; -static struct platform_device ehci_device = { - .name = "ehci-omap", - .id = 0, - .dev = { - .dma_mask = &ehci_dmamask, - .coherent_dma_mask = 0xffffffff, - .platform_data = NULL, - }, - .num_resources = ARRAY_SIZE(ehci_resources), - .resource = ehci_resources, +static struct platform_device usbhs_device = { + .name = OMAP_USBHS_DEVICE, + .id = 0, + .num_resources = ARRAY_SIZE(usbhs_resources), + .resource = usbhs_resources, }; +static struct usbhs_omap_platform_data usbhs_data; +static struct ehci_hcd_omap_platform_data ehci_data; +static struct ohci_hcd_omap_platform_data ohci_data; + /* MUX settings for EHCI pins */ /* * setup_ehci_io_mux - initialize IO pad mux for USBHOST */ -static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) +static void setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { - case EHCI_HCD_OMAP_MODE_PHY: + case OMAP_EHCI_PORT_MODE_PHY: omap_mux_init_signal("hsusb1_stp", OMAP_PIN_OUTPUT); omap_mux_init_signal("hsusb1_clk", OMAP_PIN_OUTPUT); omap_mux_init_signal("hsusb1_dir", OMAP_PIN_INPUT_PULLDOWN); @@ -81,7 +94,7 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("hsusb1_data6", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("hsusb1_data7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_TLL: + case OMAP_EHCI_PORT_MODE_TLL: omap_mux_init_signal("hsusb1_tll_stp", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hsusb1_tll_clk", @@ -107,14 +120,14 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("hsusb1_tll_data7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_UNKNOWN: + case OMAP_USBHS_PORT_MODE_UNUSED: /* FALLTHROUGH */ default: break; } switch (port_mode[1]) { - case EHCI_HCD_OMAP_MODE_PHY: + case OMAP_EHCI_PORT_MODE_PHY: omap_mux_init_signal("hsusb2_stp", OMAP_PIN_OUTPUT); omap_mux_init_signal("hsusb2_clk", OMAP_PIN_OUTPUT); omap_mux_init_signal("hsusb2_dir", OMAP_PIN_INPUT_PULLDOWN); @@ -136,7 +149,7 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("hsusb2_data7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_TLL: + case OMAP_EHCI_PORT_MODE_TLL: omap_mux_init_signal("hsusb2_tll_stp", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hsusb2_tll_clk", @@ -162,17 +175,17 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("hsusb2_tll_data7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_UNKNOWN: + case OMAP_USBHS_PORT_MODE_UNUSED: /* FALLTHROUGH */ default: break; } switch (port_mode[2]) { - case EHCI_HCD_OMAP_MODE_PHY: + case OMAP_EHCI_PORT_MODE_PHY: printk(KERN_WARNING "Port3 can't be used in PHY mode\n"); break; - case EHCI_HCD_OMAP_MODE_TLL: + case OMAP_EHCI_PORT_MODE_TLL: omap_mux_init_signal("hsusb3_tll_stp", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hsusb3_tll_clk", @@ -198,7 +211,7 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("hsusb3_tll_data7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_UNKNOWN: + case OMAP_USBHS_PORT_MODE_UNUSED: /* FALLTHROUGH */ default: break; @@ -207,10 +220,10 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) return; } -static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) +static void setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { - case EHCI_HCD_OMAP_MODE_PHY: + case OMAP_EHCI_PORT_MODE_PHY: omap_mux_init_signal("usbb1_ulpiphy_stp", OMAP_PIN_OUTPUT); omap_mux_init_signal("usbb1_ulpiphy_clk", @@ -236,7 +249,7 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("usbb1_ulpiphy_dat7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_TLL: + case OMAP_EHCI_PORT_MODE_TLL: omap_mux_init_signal("usbb1_ulpitll_stp", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("usbb1_ulpitll_clk", @@ -262,12 +275,12 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("usbb1_ulpitll_dat7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_UNKNOWN: + case OMAP_USBHS_PORT_MODE_UNUSED: default: break; } switch (port_mode[1]) { - case EHCI_HCD_OMAP_MODE_PHY: + case OMAP_EHCI_PORT_MODE_PHY: omap_mux_init_signal("usbb2_ulpiphy_stp", OMAP_PIN_OUTPUT); omap_mux_init_signal("usbb2_ulpiphy_clk", @@ -293,7 +306,7 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("usbb2_ulpiphy_dat7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_TLL: + case OMAP_EHCI_PORT_MODE_TLL: omap_mux_init_signal("usbb2_ulpitll_stp", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("usbb2_ulpitll_clk", @@ -319,90 +332,13 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) omap_mux_init_signal("usbb2_ulpitll_dat7", OMAP_PIN_INPUT_PULLDOWN); break; - case EHCI_HCD_OMAP_MODE_UNKNOWN: + case OMAP_USBHS_PORT_MODE_UNUSED: default: break; } } -void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) -{ - platform_device_add_data(&ehci_device, pdata, sizeof(*pdata)); - - /* Setup Pin IO MUX for EHCI */ - if (cpu_is_omap34xx()) { - ehci_resources[0].start = OMAP34XX_EHCI_BASE; - ehci_resources[0].end = OMAP34XX_EHCI_BASE + SZ_1K - 1; - ehci_resources[1].start = OMAP34XX_UHH_CONFIG_BASE; - ehci_resources[1].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1; - ehci_resources[2].start = OMAP34XX_USBTLL_BASE; - ehci_resources[2].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1; - ehci_resources[3].start = INT_34XX_EHCI_IRQ; - setup_ehci_io_mux(pdata->port_mode); - } else if (cpu_is_omap44xx()) { - ehci_resources[0].start = OMAP44XX_HSUSB_EHCI_BASE; - ehci_resources[0].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1; - ehci_resources[1].start = OMAP44XX_UHH_CONFIG_BASE; - ehci_resources[1].end = OMAP44XX_UHH_CONFIG_BASE + SZ_2K - 1; - ehci_resources[2].start = OMAP44XX_USBTLL_BASE; - ehci_resources[2].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1; - ehci_resources[3].start = OMAP44XX_IRQ_EHCI; - setup_4430ehci_io_mux(pdata->port_mode); - } - - if (platform_device_register(&ehci_device) < 0) { - printk(KERN_ERR "Unable to register HS-USB (EHCI) device\n"); - return; - } -} - -#else - -void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) - -{ -} - -#endif /* CONFIG_USB_EHCI_HCD */ - -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) - -static struct resource ohci_resources[] = { - { - .start = OMAP34XX_OHCI_BASE, - .end = OMAP34XX_OHCI_BASE + SZ_1K - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = OMAP34XX_UHH_CONFIG_BASE, - .end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = OMAP34XX_USBTLL_BASE, - .end = OMAP34XX_USBTLL_BASE + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - { /* general IRQ */ - .start = INT_34XX_OHCI_IRQ, - .flags = IORESOURCE_IRQ, - } -}; - -static u64 ohci_dmamask = DMA_BIT_MASK(32); - -static struct platform_device ohci_device = { - .name = "ohci-omap3", - .id = 0, - .dev = { - .dma_mask = &ohci_dmamask, - .coherent_dma_mask = 0xffffffff, - }, - .num_resources = ARRAY_SIZE(ohci_resources), - .resource = ohci_resources, -}; - -static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) +static void setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) { switch (port_mode[0]) { case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: @@ -430,7 +366,7 @@ static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) omap_mux_init_signal("mm1_txdat", OMAP_PIN_INPUT_PULLDOWN); break; - case OMAP_OHCI_PORT_MODE_UNUSED: + case OMAP_USBHS_PORT_MODE_UNUSED: /* FALLTHROUGH */ default: break; @@ -461,7 +397,7 @@ static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) omap_mux_init_signal("mm2_txdat", OMAP_PIN_INPUT_PULLDOWN); break; - case OMAP_OHCI_PORT_MODE_UNUSED: + case OMAP_USBHS_PORT_MODE_UNUSED: /* FALLTHROUGH */ default: break; @@ -492,31 +428,147 @@ static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) omap_mux_init_signal("mm3_txdat", OMAP_PIN_INPUT_PULLDOWN); break; - case OMAP_OHCI_PORT_MODE_UNUSED: + case OMAP_USBHS_PORT_MODE_UNUSED: /* FALLTHROUGH */ default: break; } } -void __init usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata) +static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) { - platform_device_add_data(&ohci_device, pdata, sizeof(*pdata)); + switch (port_mode[0]) { + case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: + case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: + omap_mux_init_signal("usbb1_mm_rxdp", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usbb1_mm_rxdm", + OMAP_PIN_INPUT_PULLDOWN); - /* Setup Pin IO MUX for OHCI */ - if (cpu_is_omap34xx()) + case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: + case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: + omap_mux_init_signal("usbb1_mm_rxrcv", + OMAP_PIN_INPUT_PULLDOWN); + + case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: + omap_mux_init_signal("usbb1_mm_txen", + OMAP_PIN_INPUT_PULLDOWN); + + + case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: + omap_mux_init_signal("usbb1_mm_txdat", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usbb1_mm_txse0", + OMAP_PIN_INPUT_PULLDOWN); + break; + + case OMAP_USBHS_PORT_MODE_UNUSED: + default: + break; + } + + switch (port_mode[1]) { + case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: + case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: + omap_mux_init_signal("usbb2_mm_rxdp", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usbb2_mm_rxdm", + OMAP_PIN_INPUT_PULLDOWN); + + case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: + case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: + omap_mux_init_signal("usbb2_mm_rxrcv", + OMAP_PIN_INPUT_PULLDOWN); + + case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: + omap_mux_init_signal("usbb2_mm_txen", + OMAP_PIN_INPUT_PULLDOWN); + + + case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: + case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: + omap_mux_init_signal("usbb2_mm_txdat", + OMAP_PIN_INPUT_PULLDOWN); + omap_mux_init_signal("usbb2_mm_txse0", + OMAP_PIN_INPUT_PULLDOWN); + break; + + case OMAP_USBHS_PORT_MODE_UNUSED: + default: + break; + } +} + +void __init usbhs_init(const struct usbhs_omap_board_data *pdata) +{ + int i; + + for (i = 0; i < OMAP3_HS_USB_PORTS; i++) { + usbhs_data.port_mode[i] = pdata->port_mode[i]; + ohci_data.port_mode[i] = pdata->port_mode[i]; + ehci_data.port_mode[i] = pdata->port_mode[i]; + ehci_data.reset_gpio_port[i] = pdata->reset_gpio_port[i]; + ehci_data.regulator[i] = pdata->regulator[i]; + } + ehci_data.phy_reset = pdata->phy_reset; + ohci_data.es2_compatibility = pdata->es2_compatibility; + usbhs_data.ehci_data = &ehci_data; + usbhs_data.ohci_data = &ohci_data; + + if (cpu_is_omap34xx()) { + usbhs_resources[0].start = OMAP34XX_UHH_CONFIG_BASE; + usbhs_resources[0].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1; + usbhs_resources[1].start = OMAP34XX_USBTLL_BASE; + usbhs_resources[1].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1; + usbhs_resources[2].start = OMAP34XX_EHCI_BASE; + usbhs_resources[2].end = OMAP34XX_EHCI_BASE + SZ_1K - 1; + usbhs_resources[3].start = INT_34XX_EHCI_IRQ; + usbhs_resources[4].start = OMAP34XX_OHCI_BASE; + usbhs_resources[4].end = OMAP34XX_OHCI_BASE + SZ_1K - 1; + usbhs_resources[5].start = INT_34XX_OHCI_IRQ; + setup_ehci_io_mux(pdata->port_mode); setup_ohci_io_mux(pdata->port_mode); + } else if (cpu_is_omap44xx()) { + usbhs_resources[0].start = OMAP44XX_UHH_CONFIG_BASE; + usbhs_resources[0].end = OMAP44XX_UHH_CONFIG_BASE + SZ_1K - 1; + usbhs_resources[1].start = OMAP44XX_USBTLL_BASE; + usbhs_resources[1].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1; + usbhs_resources[2].start = OMAP44XX_HSUSB_EHCI_BASE; + usbhs_resources[2].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1; + usbhs_resources[3].start = OMAP44XX_IRQ_EHCI; + usbhs_resources[4].start = OMAP44XX_HSUSB_OHCI_BASE; + usbhs_resources[4].end = OMAP44XX_HSUSB_OHCI_BASE + SZ_1K - 1; + usbhs_resources[5].start = OMAP44XX_IRQ_OHCI; + setup_4430ehci_io_mux(pdata->port_mode); + setup_4430ohci_io_mux(pdata->port_mode); + } - if (platform_device_register(&ohci_device) < 0) { - pr_err("Unable to register FS-USB (OHCI) device\n"); - return; + if (platform_device_add_data(&usbhs_device, + &usbhs_data, sizeof(usbhs_data)) < 0) { + printk(KERN_ERR "USBHS platform_device_add_data failed\n"); + goto init_end; } + + if (platform_device_register(&usbhs_device) < 0) + printk(KERN_ERR "USBHS platform_device_register failed\n"); + +init_end: + return; } #else -void __init usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata) +void __init usbhs_init(const struct usbhs_omap_board_data *pdata) { } -#endif /* CONFIG_USB_OHCI_HCD */ +#endif + + diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 5298949d4b11..241fc94b4116 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -214,6 +214,10 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data) if (platform_device_register(&musb_device) < 0) printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); + + if (cpu_is_omap44xx()) + omap4430_phy_init(dev); + } #else diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c index 1a81fe12ccd7..1e93f176c1de 100644 --- a/arch/arm/mach-s3c2410/mach-h1940.c +++ b/arch/arm/mach-s3c2410/mach-h1940.c @@ -162,29 +162,10 @@ struct gpio_chip h1940_latch_gpiochip = { .get = h1940_gpiolib_latch_get, }; -static void h1940_udc_pullup(enum s3c2410_udc_cmd_e cmd) -{ - printk(KERN_DEBUG "udc: pullup(%d)\n",cmd); - - switch (cmd) - { - case S3C2410_UDC_P_ENABLE : - gpio_set_value(H1940_LATCH_USB_DP, 1); - break; - case S3C2410_UDC_P_DISABLE : - gpio_set_value(H1940_LATCH_USB_DP, 0); - break; - case S3C2410_UDC_P_RESET : - break; - default: - break; - } -} - static struct s3c2410_udc_mach_info h1940_udc_cfg __initdata = { - .udc_command = h1940_udc_pullup, .vbus_pin = S3C2410_GPG(5), .vbus_pin_inverted = 1, + .pullup_pin = H1940_LATCH_USB_DP, }; static struct s3c2410_ts_mach_info h1940_ts_cfg __initdata = { @@ -475,9 +456,6 @@ static void __init h1940_init(void) gpio_direction_output(H1940_LATCH_LCD_P4, 0); gpio_direction_output(H1940_LATCH_MAX1698_nSHUTDOWN, 0); - gpio_request(H1940_LATCH_USB_DP, "USB pullup"); - gpio_direction_output(H1940_LATCH_USB_DP, 0); - gpio_request(H1940_LATCH_SD_POWER, "SD power"); gpio_direction_output(H1940_LATCH_SD_POWER, 0); diff --git a/arch/arm/mach-s3c2410/mach-n30.c b/arch/arm/mach-s3c2410/mach-n30.c index 271b9aa6d40a..66f44440d5d3 100644 --- a/arch/arm/mach-s3c2410/mach-n30.c +++ b/arch/arm/mach-s3c2410/mach-n30.c @@ -84,26 +84,10 @@ static struct s3c2410_uartcfg n30_uartcfgs[] = { }, }; -static void n30_udc_pullup(enum s3c2410_udc_cmd_e cmd) -{ - switch (cmd) { - case S3C2410_UDC_P_ENABLE : - gpio_set_value(S3C2410_GPB(3), 1); - break; - case S3C2410_UDC_P_DISABLE : - gpio_set_value(S3C2410_GPB(3), 0); - break; - case S3C2410_UDC_P_RESET : - break; - default: - break; - } -} - static struct s3c2410_udc_mach_info n30_udc_cfg __initdata = { - .udc_command = n30_udc_pullup, .vbus_pin = S3C2410_GPG(1), .vbus_pin_inverted = 0, + .pullup_pin = S3C2410_GPB(3), }; static struct gpio_keys_button n30_buttons[] = { @@ -596,9 +580,6 @@ static void __init n30_init(void) platform_add_devices(n35_devices, ARRAY_SIZE(n35_devices)); } - - WARN_ON(gpio_request(S3C2410_GPB(3), "udc pup")); - gpio_direction_output(S3C2410_GPB(3), 0); } MACHINE_START(N30, "Acer-N30") diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c index 8e5758bdd666..834cfb61bcfe 100644 --- a/arch/arm/mach-s3c2412/mach-smdk2413.c +++ b/arch/arm/mach-s3c2412/mach-smdk2413.c @@ -78,28 +78,9 @@ static struct s3c2410_uartcfg smdk2413_uartcfgs[] __initdata = { } }; -static void smdk2413_udc_pullup(enum s3c2410_udc_cmd_e cmd) -{ - printk(KERN_DEBUG "udc: pullup(%d)\n",cmd); - - switch (cmd) - { - case S3C2410_UDC_P_ENABLE : - gpio_set_value(S3C2410_GPF(2), 1); - break; - case S3C2410_UDC_P_DISABLE : - gpio_set_value(S3C2410_GPF(2), 0); - break; - case S3C2410_UDC_P_RESET : - break; - default: - break; - } -} - static struct s3c2410_udc_mach_info smdk2413_udc_cfg __initdata = { - .udc_command = smdk2413_udc_pullup, + .pullup_pin = S3C2410_GPF(2), }; @@ -133,9 +114,6 @@ static void __init smdk2413_machine_init(void) { /* Turn off suspend on both USB ports, and switch the * selectable USB port to USB device mode. */ - WARN_ON(gpio_request(S3C2410_GPF(2), "udc pull")); - gpio_direction_output(S3C2410_GPF(2), 0); - s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c index 9f2c14ec7181..37405d9abe32 100644 --- a/arch/arm/mach-s3c2440/mach-gta02.c +++ b/arch/arm/mach-s3c2440/mach-gta02.c @@ -455,28 +455,10 @@ static struct s3c2410_platform_nand __initdata gta02_nand_info = { }; -static void gta02_udc_command(enum s3c2410_udc_cmd_e cmd) -{ - switch (cmd) { - case S3C2410_UDC_P_ENABLE: - pr_debug("%s S3C2410_UDC_P_ENABLE\n", __func__); - gpio_direction_output(GTA02_GPIO_USB_PULLUP, 1); - break; - case S3C2410_UDC_P_DISABLE: - pr_debug("%s S3C2410_UDC_P_DISABLE\n", __func__); - gpio_direction_output(GTA02_GPIO_USB_PULLUP, 0); - break; - case S3C2410_UDC_P_RESET: - pr_debug("%s S3C2410_UDC_P_RESET\n", __func__); - /* FIXME: Do something here. */ - } -} - /* Get PMU to set USB current limit accordingly. */ -static struct s3c2410_udc_mach_info gta02_udc_cfg = { +static struct s3c2410_udc_mach_info gta02_udc_cfg __initdata = { .vbus_draw = gta02_udc_vbus_draw, - .udc_command = gta02_udc_command, - + .pullup_pin = GTA02_GPIO_USB_PULLUP, }; /* USB */ diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index f62bb4c793bd..d80f129bca94 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c @@ -97,26 +97,8 @@ static struct s3c2410_uartcfg mini2440_uartcfgs[] __initdata = { /* USB device UDC support */ -static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd) -{ - pr_debug("udc: pullup(%d)\n", cmd); - - switch (cmd) { - case S3C2410_UDC_P_ENABLE : - gpio_set_value(S3C2410_GPC(5), 1); - break; - case S3C2410_UDC_P_DISABLE : - gpio_set_value(S3C2410_GPC(5), 0); - break; - case S3C2410_UDC_P_RESET : - break; - default: - break; - } -} - static struct s3c2410_udc_mach_info mini2440_udc_cfg __initdata = { - .udc_command = mini2440_udc_pullup, + .pullup_pin = S3C2410_GPC(5), }; @@ -644,10 +626,6 @@ static void __init mini2440_init(void) s3c2410_gpio_setpin(S3C2410_GPB(1), 0); s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT); - /* Make sure the D+ pullup pin is output */ - WARN_ON(gpio_request(S3C2410_GPC(5), "udc pup")); - gpio_direction_output(S3C2410_GPC(5), 0); - /* mark the key as input, without pullups (there is one on the board) */ for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) { s3c_gpio_setpull(mini2440_buttons[i].gpio, S3C_GPIO_PULL_UP); diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c index eab6ae50683c..86bbc233b31c 100644 --- a/arch/arm/mach-s3c2440/mach-rx1950.c +++ b/arch/arm/mach-s3c2440/mach-rx1950.c @@ -566,26 +566,10 @@ static struct s3c2410_platform_nand rx1950_nand_info = { .sets = rx1950_nand_sets, }; -static void rx1950_udc_pullup(enum s3c2410_udc_cmd_e cmd) -{ - switch (cmd) { - case S3C2410_UDC_P_ENABLE: - gpio_direction_output(S3C2410_GPJ(5), 1); - break; - case S3C2410_UDC_P_DISABLE: - gpio_direction_output(S3C2410_GPJ(5), 0); - break; - case S3C2410_UDC_P_RESET: - break; - default: - break; - } -} - static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = { - .udc_command = rx1950_udc_pullup, .vbus_pin = S3C2410_GPG(5), .vbus_pin_inverted = 1, + .pullup_pin = S3C2410_GPJ(5), }; static struct s3c2410_ts_mach_info rx1950_ts_cfg __initdata = { @@ -750,9 +734,6 @@ static void __init rx1950_init_machine(void) S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); - WARN_ON(gpio_request(S3C2410_GPJ(5), "UDC pullup")); - gpio_direction_output(S3C2410_GPJ(5), 0); - /* mmc power is disabled by default */ WARN_ON(gpio_request(S3C2410_GPJ(1), "MMC power")); gpio_direction_output(S3C2410_GPJ(1), 0); diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index acd9552f8ada..622a9ec1ff08 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -10,6 +10,9 @@ config ARCH_TEGRA_2x_SOC select CPU_V7 select ARM_GIC select ARCH_REQUIRE_GPIOLIB + select USB_ARCH_HAS_EHCI if USB_SUPPORT + select USB_ULPI if USB_SUPPORT + select USB_ULPI_VIEWPORT if USB_SUPPORT help Support for NVIDIA Tegra AP20 and T20 processors, based on the ARM CortexA9MP CPU and the ARM PL310 L2 cache controller @@ -27,6 +30,31 @@ config MACH_HARMONY help Support for nVidia Harmony development platform +config MACH_KAEN + bool "Kaen board" + select MACH_SEABOARD + help + Support for the Kaen version of Seaboard + +config MACH_SEABOARD + bool "Seaboard board" + help + Support for nVidia Seaboard development platform. It will + also be included for some of the derivative boards that + have large similarities with the seaboard design. + +config MACH_TRIMSLICE + bool "TrimSlice board" + select TEGRA_PCI + help + Support for CompuLab TrimSlice platform + +config MACH_WARIO + bool "Wario board" + select MACH_SEABOARD + help + Support for the Wario version of Seaboard + choice prompt "Low-level debug console UART" default TEGRA_DEBUG_UART_NONE @@ -58,4 +86,7 @@ config TEGRA_SYSTEM_DMA Adds system DMA functionality for NVIDIA Tegra SoCs, used by several Tegra device drivers +config TEGRA_EMC_SCALING_ENABLE + bool "Enable scaling the memory frequency" + endif diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index cdbc68e4c0ca..9f7a7e1e0c38 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -1,21 +1,30 @@ obj-y += common.o +obj-y += devices.o obj-y += io.o obj-y += irq.o legacy_irq.o obj-y += clock.o obj-y += timer.o obj-y += gpio.o obj-y += pinmux.o +obj-y += powergate.o obj-y += fuse.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o -obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o +obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o obj-$(CONFIG_TEGRA_PCI) += pcie.o +obj-$(CONFIG_USB_SUPPORT) += usb_phy.o obj-${CONFIG_MACH_HARMONY} += board-harmony.o obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o + +obj-${CONFIG_MACH_SEABOARD} += board-seaboard.o +obj-${CONFIG_MACH_SEABOARD} += board-seaboard-pinmux.o + +obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice.o +obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice-pinmux.o diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c index 50b15d500cac..98368d947be3 100644 --- a/arch/arm/mach-tegra/board-harmony-pinmux.c +++ b/arch/arm/mach-tegra/board-harmony-pinmux.c @@ -15,8 +15,10 @@ */ #include <linux/kernel.h> +#include <linux/gpio.h> #include <mach/pinmux.h> +#include "gpio-names.h" #include "board-harmony.h" static struct tegra_pingroup_config harmony_pinmux[] = { @@ -34,10 +36,10 @@ static struct tegra_pingroup_config harmony_pinmux[] = { {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, - {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, - {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, - {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, @@ -138,7 +140,18 @@ static struct tegra_pingroup_config harmony_pinmux[] = { {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, }; +static struct tegra_gpio_table gpio_table[] = { + { .gpio = TEGRA_GPIO_PI5, .enable = true }, /* mmc2 cd */ + { .gpio = TEGRA_GPIO_PH1, .enable = true }, /* mmc2 wp */ + { .gpio = TEGRA_GPIO_PT3, .enable = true }, /* mmc2 pwr */ + { .gpio = TEGRA_GPIO_PH2, .enable = true }, /* mmc4 cd */ + { .gpio = TEGRA_GPIO_PH3, .enable = true }, /* mmc4 wp */ + { .gpio = TEGRA_GPIO_PI6, .enable = true }, /* mmc4 pwr */ +}; + void harmony_pinmux_init(void) { tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux)); + + tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table)); } diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c index b9dbdb1289d0..49224e936eb4 100644 --- a/arch/arm/mach-tegra/board-harmony.c +++ b/arch/arm/mach-tegra/board-harmony.c @@ -30,35 +30,13 @@ #include <mach/iomap.h> #include <mach/irqs.h> +#include <mach/sdhci.h> #include "board.h" #include "board-harmony.h" #include "clock.h" - -/* NVidia bootloader tags */ -#define ATAG_NVIDIA 0x41000801 - -#define ATAG_NVIDIA_RM 0x1 -#define ATAG_NVIDIA_DISPLAY 0x2 -#define ATAG_NVIDIA_FRAMEBUFFER 0x3 -#define ATAG_NVIDIA_CHIPSHMOO 0x4 -#define ATAG_NVIDIA_CHIPSHMOOPHYS 0x5 -#define ATAG_NVIDIA_PRESERVED_MEM_0 0x10000 -#define ATAG_NVIDIA_PRESERVED_MEM_N 2 -#define ATAG_NVIDIA_FORCE_32 0x7fffffff - -struct tag_tegra { - __u32 bootarg_key; - __u32 bootarg_len; - char bootarg[1]; -}; - -static int __init parse_tag_nvidia(const struct tag *tag) -{ - - return 0; -} -__tagtable(ATAG_NVIDIA, parse_tag_nvidia); +#include "devices.h" +#include "gpio-names.h" static struct plat_serial8250_port debug_uart_platform_data[] = { { @@ -84,6 +62,9 @@ static struct platform_device debug_uart = { static struct platform_device *harmony_devices[] __initdata = { &debug_uart, + &tegra_sdhci_device1, + &tegra_sdhci_device2, + &tegra_sdhci_device4, }; static void __init tegra_harmony_fixup(struct machine_desc *desc, @@ -102,22 +83,45 @@ static __initdata struct tegra_clk_init_table harmony_clk_init_table[] = { { NULL, NULL, 0, 0}, }; + +static struct tegra_sdhci_platform_data sdhci_pdata1 = { + .cd_gpio = -1, + .wp_gpio = -1, + .power_gpio = -1, +}; + +static struct tegra_sdhci_platform_data sdhci_pdata2 = { + .cd_gpio = TEGRA_GPIO_PI5, + .wp_gpio = TEGRA_GPIO_PH1, + .power_gpio = TEGRA_GPIO_PT3, +}; + +static struct tegra_sdhci_platform_data sdhci_pdata4 = { + .cd_gpio = TEGRA_GPIO_PH2, + .wp_gpio = TEGRA_GPIO_PH3, + .power_gpio = TEGRA_GPIO_PI6, + .is_8bit = 1, +}; + static void __init tegra_harmony_init(void) { - tegra_common_init(); - tegra_clk_init_from_table(harmony_clk_init_table); harmony_pinmux_init(); + tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1; + tegra_sdhci_device2.dev.platform_data = &sdhci_pdata2; + tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4; + platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices)); } MACHINE_START(HARMONY, "harmony") .boot_params = 0x00000100, .fixup = tegra_harmony_fixup, - .init_irq = tegra_init_irq, - .init_machine = tegra_harmony_init, .map_io = tegra_map_common_io, + .init_early = tegra_init_early, + .init_irq = tegra_init_irq, .timer = &tegra_timer, + .init_machine = tegra_harmony_init, MACHINE_END diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c new file mode 100644 index 000000000000..2d6ad83ed4b2 --- /dev/null +++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2010 NVIDIA Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/gpio.h> + +#include <mach/pinmux.h> +#include <mach/pinmux-t2.h> + +#include "gpio-names.h" +#include "board-seaboard.h" + +#define DEFAULT_DRIVE(_name) \ + { \ + .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \ + .hsm = TEGRA_HSM_DISABLE, \ + .schmitt = TEGRA_SCHMITT_ENABLE, \ + .drive = TEGRA_DRIVE_DIV_1, \ + .pull_down = TEGRA_PULL_31, \ + .pull_up = TEGRA_PULL_31, \ + .slew_rising = TEGRA_SLEW_SLOWEST, \ + .slew_falling = TEGRA_SLEW_SLOWEST, \ + } + +static __initdata struct tegra_drive_pingroup_config seaboard_drive_pinmux[] = { + DEFAULT_DRIVE(SDIO1), +}; + +static __initdata struct tegra_pingroup_config seaboard_pinmux[] = { + {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GPU, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LCSN, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LDC, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LM0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LM1, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LPW0, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LPW1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LPW2, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LSC1, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSCK, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSDA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSDI, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LVP0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, +}; + + + + +static struct tegra_gpio_table gpio_table[] = { + { .gpio = TEGRA_GPIO_PI5, .enable = true }, /* mmc2 cd */ + { .gpio = TEGRA_GPIO_PH1, .enable = true }, /* mmc2 wp */ + { .gpio = TEGRA_GPIO_PI6, .enable = true }, /* mmc2 pwr */ + { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true }, /* lid switch */ + { .gpio = TEGRA_GPIO_POWERKEY, .enable = true }, /* power key */ +}; + +void __init seaboard_pinmux_init(void) +{ + tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux)); + + tegra_drive_pinmux_config_table(seaboard_drive_pinmux, + ARRAY_SIZE(seaboard_drive_pinmux)); + + tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table)); +} diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c new file mode 100644 index 000000000000..6ca9e61f6cd0 --- /dev/null +++ b/arch/arm/mach-tegra/board-seaboard.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2010, 2011 NVIDIA Corporation. + * Copyright (C) 2010, 2011 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/serial_8250.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/io.h> +#include <linux/gpio_keys.h> + +#include <mach/iomap.h> +#include <mach/irqs.h> +#include <mach/sdhci.h> + +#include <asm/mach-types.h> +#include <asm/mach/arch.h> + +#include "board.h" +#include "board-seaboard.h" +#include "clock.h" +#include "devices.h" +#include "gpio-names.h" + +static struct plat_serial8250_port debug_uart_platform_data[] = { + { + /* Memory and IRQ filled in before registration */ + .flags = UPF_BOOT_AUTOCONF, + .iotype = UPIO_MEM, + .regshift = 2, + .uartclk = 216000000, + }, { + .flags = 0, + } +}; + +static struct platform_device debug_uart = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev = { + .platform_data = debug_uart_platform_data, + }, +}; + +static __initdata struct tegra_clk_init_table seaboard_clk_init_table[] = { + /* name parent rate enabled */ + { "uartb", "pll_p", 216000000, true}, + { "uartd", "pll_p", 216000000, true}, + { NULL, NULL, 0, 0}, +}; + +static struct gpio_keys_button seaboard_gpio_keys_buttons[] = { + { + .code = SW_LID, + .gpio = TEGRA_GPIO_LIDSWITCH, + .active_low = 0, + .desc = "Lid", + .type = EV_SW, + .wakeup = 1, + .debounce_interval = 1, + }, + { + .code = KEY_POWER, + .gpio = TEGRA_GPIO_POWERKEY, + .active_low = 1, + .desc = "Power", + .type = EV_KEY, + .wakeup = 1, + }, +}; + +static struct gpio_keys_platform_data seaboard_gpio_keys = { + .buttons = seaboard_gpio_keys_buttons, + .nbuttons = ARRAY_SIZE(seaboard_gpio_keys_buttons), +}; + +static struct platform_device seaboard_gpio_keys_device = { + .name = "gpio-keys", + .id = -1, + .dev = { + .platform_data = &seaboard_gpio_keys, + } +}; + +static struct tegra_sdhci_platform_data sdhci_pdata1 = { + .cd_gpio = -1, + .wp_gpio = -1, + .power_gpio = -1, +}; + +static struct tegra_sdhci_platform_data sdhci_pdata3 = { + .cd_gpio = TEGRA_GPIO_PI5, + .wp_gpio = TEGRA_GPIO_PH1, + .power_gpio = TEGRA_GPIO_PI6, +}; + +static struct tegra_sdhci_platform_data sdhci_pdata4 = { + .cd_gpio = -1, + .wp_gpio = -1, + .power_gpio = -1, + .is_8bit = 1, +}; + +static struct platform_device *seaboard_devices[] __initdata = { + &debug_uart, + &tegra_pmu_device, + &tegra_sdhci_device1, + &tegra_sdhci_device3, + &tegra_sdhci_device4, + &seaboard_gpio_keys_device, +}; + +static void __init __tegra_seaboard_init(void) +{ + seaboard_pinmux_init(); + + tegra_clk_init_from_table(seaboard_clk_init_table); + + tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1; + tegra_sdhci_device3.dev.platform_data = &sdhci_pdata3; + tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4; + + platform_add_devices(seaboard_devices, ARRAY_SIZE(seaboard_devices)); +} + +static void __init tegra_seaboard_init(void) +{ + /* Seaboard uses UARTD for the debug port. */ + debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTD_BASE); + debug_uart_platform_data[0].mapbase = TEGRA_UARTD_BASE; + debug_uart_platform_data[0].irq = INT_UARTD; + + __tegra_seaboard_init(); +} + +static void __init tegra_kaen_init(void) +{ + /* Kaen uses UARTB for the debug port. */ + debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE); + debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE; + debug_uart_platform_data[0].irq = INT_UARTB; + + __tegra_seaboard_init(); +} + +static void __init tegra_wario_init(void) +{ + /* Wario uses UARTB for the debug port. */ + debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE); + debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE; + debug_uart_platform_data[0].irq = INT_UARTB; + + __tegra_seaboard_init(); +} + + +MACHINE_START(SEABOARD, "seaboard") + .boot_params = 0x00000100, + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, + .init_irq = tegra_init_irq, + .timer = &tegra_timer, + .init_machine = tegra_seaboard_init, +MACHINE_END + +MACHINE_START(KAEN, "kaen") + .boot_params = 0x00000100, + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, + .init_irq = tegra_init_irq, + .timer = &tegra_timer, + .init_machine = tegra_kaen_init, +MACHINE_END + +MACHINE_START(WARIO, "wario") + .boot_params = 0x00000100, + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, + .init_irq = tegra_init_irq, + .timer = &tegra_timer, + .init_machine = tegra_wario_init, +MACHINE_END diff --git a/arch/arm/mach-tegra/board-seaboard.h b/arch/arm/mach-tegra/board-seaboard.h new file mode 100644 index 000000000000..a098e3599731 --- /dev/null +++ b/arch/arm/mach-tegra/board-seaboard.h @@ -0,0 +1,38 @@ +/* + * arch/arm/mach-tegra/board-seaboard.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MACH_TEGRA_BOARD_SEABOARD_H +#define _MACH_TEGRA_BOARD_SEABOARD_H + +#define TEGRA_GPIO_LIDSWITCH TEGRA_GPIO_PC7 +#define TEGRA_GPIO_USB1 TEGRA_GPIO_PD0 +#define TEGRA_GPIO_POWERKEY TEGRA_GPIO_PV2 +#define TEGRA_GPIO_BACKLIGHT TEGRA_GPIO_PD4 +#define TEGRA_GPIO_LVDS_SHUTDOWN TEGRA_GPIO_PB2 +#define TEGRA_GPIO_BACKLIGHT_PWM TEGRA_GPIO_PU5 +#define TEGRA_GPIO_BACKLIGHT_VDD TEGRA_GPIO_PW0 +#define TEGRA_GPIO_EN_VDD_PNL TEGRA_GPIO_PC6 +#define TEGRA_GPIO_MAGNETOMETER TEGRA_GPIO_PN5 +#define TEGRA_GPIO_ISL29018_IRQ TEGRA_GPIO_PZ2 +#define TEGRA_GPIO_AC_ONLINE TEGRA_GPIO_PV3 + +#define TPS_GPIO_BASE TEGRA_NR_GPIOS + +#define TPS_GPIO_WWAN_PWR (TPS_GPIO_BASE + 2) + +void seaboard_pinmux_init(void); + +#endif diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c new file mode 100644 index 000000000000..6d4fc9f7f1fb --- /dev/null +++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c @@ -0,0 +1,145 @@ +/* + * arch/arm/mach-tegra/board-trimslice-pinmux.c + * + * Copyright (C) 2011 CompuLab, Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <mach/pinmux.h> + +#include "board-trimslice.h" + +static __initdata struct tegra_pingroup_config trimslice_pinmux[] = { + {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_OSC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GMB, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_GMC, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GME, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_GPU, TEGRA_MUX_UARTA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LCSN, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSCK, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSDA, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_PTA, TEGRA_MUX_RSVD3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_SPDI, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPDO, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIA, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIB, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIC, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, + {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, + {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, +}; + +void __init trimslice_pinmux_init(void) +{ + tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux)); +} diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c new file mode 100644 index 000000000000..7be7d4acd02f --- /dev/null +++ b/arch/arm/mach-tegra/board-trimslice.c @@ -0,0 +1,106 @@ +/* + * arch/arm/mach-tegra/board-trimslice.c + * + * Copyright (C) 2011 CompuLab, Ltd. + * Author: Mike Rapoport <mike@compulab.co.il> + * + * Based on board-harmony.c + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/serial_8250.h> +#include <linux/io.h> + +#include <asm/mach-types.h> +#include <asm/mach/arch.h> +#include <asm/setup.h> + +#include <mach/iomap.h> + +#include "board.h" +#include "clock.h" + +#include "board-trimslice.h" + +static struct plat_serial8250_port debug_uart_platform_data[] = { + { + .membase = IO_ADDRESS(TEGRA_UARTA_BASE), + .mapbase = TEGRA_UARTA_BASE, + .irq = INT_UARTA, + .flags = UPF_BOOT_AUTOCONF, + .iotype = UPIO_MEM, + .regshift = 2, + .uartclk = 216000000, + }, { + .flags = 0 + } +}; + +static struct platform_device debug_uart = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev = { + .platform_data = debug_uart_platform_data, + }, +}; + +static struct platform_device *trimslice_devices[] __initdata = { + &debug_uart, +}; + +static void __init tegra_trimslice_fixup(struct machine_desc *desc, + struct tag *tags, char **cmdline, struct meminfo *mi) +{ + mi->nr_banks = 2; + mi->bank[0].start = PHYS_OFFSET; + mi->bank[0].size = 448 * SZ_1M; + mi->bank[1].start = SZ_512M; + mi->bank[1].size = SZ_512M; +} + +static __initdata struct tegra_clk_init_table trimslice_clk_init_table[] = { + /* name parent rate enabled */ + { "uarta", "pll_p", 216000000, true }, + { NULL, NULL, 0, 0}, +}; + +static int __init tegra_trimslice_pci_init(void) +{ + if (!machine_is_trimslice()) + return 0; + + return tegra_pcie_init(true, true); +} +subsys_initcall(tegra_trimslice_pci_init); + +static void __init tegra_trimslice_init(void) +{ + tegra_clk_init_from_table(trimslice_clk_init_table); + + trimslice_pinmux_init(); + + platform_add_devices(trimslice_devices, ARRAY_SIZE(trimslice_devices)); +} + +MACHINE_START(TRIMSLICE, "trimslice") + .boot_params = 0x00000100, + .fixup = tegra_trimslice_fixup, + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, + .init_irq = tegra_init_irq, + .timer = &tegra_timer, + .init_machine = tegra_trimslice_init, +MACHINE_END diff --git a/arch/arm/mach-tegra/tegra2_dvfs.h b/arch/arm/mach-tegra/board-trimslice.h index f8c1adba96a6..16ec0f0d3bb1 100644 --- a/arch/arm/mach-tegra/tegra2_dvfs.h +++ b/arch/arm/mach-tegra/board-trimslice.h @@ -1,10 +1,7 @@ /* - * arch/arm/mach-tegra/tegra2_dvfs.h + * arch/arm/mach-tegra/board-trimslice.h * - * Copyright (C) 2010 Google, Inc. - * - * Author: - * Colin Cross <ccross@google.com> + * Copyright (C) 2011 CompuLab, Ltd. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -17,4 +14,9 @@ * */ -extern struct dvfs tegra_dvfs_virtual_cpu_dvfs; +#ifndef _MACH_TEGRA_BOARD_TRIMSLICE_H +#define _MACH_TEGRA_BOARD_TRIMSLICE_H + +void trimslice_pinmux_init(void); + +#endif diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h index 0de565ca37c5..1d14df7eb7de 100644 --- a/arch/arm/mach-tegra/board.h +++ b/arch/arm/mach-tegra/board.h @@ -23,7 +23,9 @@ #include <linux/types.h> -void __init tegra_common_init(void); +void tegra_assert_system_reset(char mode, const char *cmd); + +void __init tegra_init_early(void); void __init tegra_map_common_io(void); void __init tegra_init_irq(void); void __init tegra_init_clock(void); diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c index 77948e0f4909..e028320ab423 100644 --- a/arch/arm/mach-tegra/clock.c +++ b/arch/arm/mach-tegra/clock.c @@ -18,238 +18,177 @@ #include <linux/kernel.h> #include <linux/clk.h> -#include <linux/list.h> +#include <linux/clkdev.h> +#include <linux/debugfs.h> +#include <linux/delay.h> #include <linux/init.h> +#include <linux/list.h> #include <linux/module.h> -#include <linux/debugfs.h> -#include <linux/slab.h> +#include <linux/sched.h> #include <linux/seq_file.h> -#include <linux/regulator/consumer.h> -#include <linux/clkdev.h> +#include <linux/slab.h> + +#include <mach/clk.h> -#include "clock.h" #include "board.h" -#include "fuse.h" +#include "clock.h" +/* + * Locking: + * + * Each struct clk has a spinlock. + * + * To avoid AB-BA locking problems, locks must always be traversed from child + * clock to parent clock. For example, when enabling a clock, the clock's lock + * is taken, and then clk_enable is called on the parent, which take's the + * parent clock's lock. There is one exceptions to this ordering: When dumping + * the clock tree through debugfs. In this case, clk_lock_all is called, + * which attemps to iterate through the entire list of clocks and take every + * clock lock. If any call to spin_trylock fails, all locked clocks are + * unlocked, and the process is retried. When all the locks are held, + * the only clock operation that can be called is clk_get_rate_all_locked. + * + * Within a single clock, no clock operation can call another clock operation + * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any + * clock operation can call any other clock operation on any of it's possible + * parents. + * + * An additional mutex, clock_list_lock, is used to protect the list of all + * clocks. + * + * The clock operations must lock internally to protect against + * read-modify-write on registers that are shared by multiple clocks + */ +static DEFINE_MUTEX(clock_list_lock); static LIST_HEAD(clocks); -static DEFINE_SPINLOCK(clock_lock); -static DEFINE_MUTEX(dvfs_lock); - -static int clk_is_dvfs(struct clk *c) -{ - return (c->dvfs != NULL); -}; - -static int dvfs_set_rate(struct dvfs *d, unsigned long rate) -{ - struct dvfs_table *t; - - if (d->table == NULL) - return -ENODEV; - - for (t = d->table; t->rate != 0; t++) { - if (rate <= t->rate) { - if (!d->reg) - return 0; - - return regulator_set_voltage(d->reg, - t->millivolts * 1000, - d->max_millivolts * 1000); - } - } - - return -EINVAL; -} - -static void dvfs_init(struct clk *c) -{ - int process_id; - int i; - struct dvfs_table *table; - - process_id = c->dvfs->cpu ? tegra_core_process_id() : - tegra_cpu_process_id(); - - for (i = 0; i < c->dvfs->process_id_table_length; i++) - if (process_id == c->dvfs->process_id_table[i].process_id) - c->dvfs->table = c->dvfs->process_id_table[i].table; - - if (c->dvfs->table == NULL) { - pr_err("Failed to find dvfs table for clock %s process %d\n", - c->name, process_id); - return; - } - - c->dvfs->max_millivolts = 0; - for (table = c->dvfs->table; table->rate != 0; table++) - if (c->dvfs->max_millivolts < table->millivolts) - c->dvfs->max_millivolts = table->millivolts; - - c->dvfs->reg = regulator_get(NULL, c->dvfs->reg_id); - - if (IS_ERR(c->dvfs->reg)) { - pr_err("Failed to get regulator %s for clock %s\n", - c->dvfs->reg_id, c->name); - c->dvfs->reg = NULL; - return; - } - - if (c->refcnt > 0) - dvfs_set_rate(c->dvfs, c->rate); -} - struct clk *tegra_get_clock_by_name(const char *name) { struct clk *c; struct clk *ret = NULL; - unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); + mutex_lock(&clock_list_lock); list_for_each_entry(c, &clocks, node) { if (strcmp(c->name, name) == 0) { ret = c; break; } } - spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_lock); return ret; } -static void clk_recalculate_rate(struct clk *c) +/* Must be called with c->spinlock held */ +static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p) { u64 rate; - if (!c->parent) - return; - - rate = c->parent->rate; + rate = clk_get_rate(p); if (c->mul != 0 && c->div != 0) { - rate = rate * c->mul; + rate *= c->mul; + rate += c->div - 1; /* round up */ do_div(rate, c->div); } - if (rate > c->max_rate) - pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n", - c->name, rate, c->max_rate); - - c->rate = rate; + return rate; } -int clk_reparent(struct clk *c, struct clk *parent) +/* Must be called with c->spinlock held */ +unsigned long clk_get_rate_locked(struct clk *c) { - pr_debug("%s: %s\n", __func__, c->name); - c->parent = parent; - list_del(&c->sibling); - list_add_tail(&c->sibling, &parent->children); - return 0; -} + unsigned long rate; -static void propagate_rate(struct clk *c) -{ - struct clk *clkp; - pr_debug("%s: %s\n", __func__, c->name); - list_for_each_entry(clkp, &c->children, sibling) { - pr_debug(" %s\n", clkp->name); - clk_recalculate_rate(clkp); - propagate_rate(clkp); - } + if (c->parent) + rate = clk_predict_rate_from_parent(c, c->parent); + else + rate = c->rate; + + return rate; } -void clk_init(struct clk *c) +unsigned long clk_get_rate(struct clk *c) { unsigned long flags; + unsigned long rate; + + spin_lock_irqsave(&c->spinlock, flags); - pr_debug("%s: %s\n", __func__, c->name); + rate = clk_get_rate_locked(c); - spin_lock_irqsave(&clock_lock, flags); + spin_unlock_irqrestore(&c->spinlock, flags); - INIT_LIST_HEAD(&c->children); - INIT_LIST_HEAD(&c->sibling); + return rate; +} +EXPORT_SYMBOL(clk_get_rate); + +int clk_reparent(struct clk *c, struct clk *parent) +{ + c->parent = parent; + return 0; +} + +void clk_init(struct clk *c) +{ + spin_lock_init(&c->spinlock); if (c->ops && c->ops->init) c->ops->init(c); - clk_recalculate_rate(c); + if (!c->ops || !c->ops->enable) { + c->refcnt++; + c->set = true; + if (c->parent) + c->state = c->parent->state; + else + c->state = ON; + } + mutex_lock(&clock_list_lock); list_add(&c->node, &clocks); - - if (c->parent) - list_add_tail(&c->sibling, &c->parent->children); - - spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_lock); } -int clk_enable_locked(struct clk *c) +int clk_enable(struct clk *c) { - int ret; - pr_debug("%s: %s\n", __func__, c->name); + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&c->spinlock, flags); + if (c->refcnt == 0) { if (c->parent) { - ret = clk_enable_locked(c->parent); + ret = clk_enable(c->parent); if (ret) - return ret; + goto out; } if (c->ops && c->ops->enable) { ret = c->ops->enable(c); if (ret) { if (c->parent) - clk_disable_locked(c->parent); - return ret; + clk_disable(c->parent); + goto out; } c->state = ON; -#ifdef CONFIG_DEBUG_FS - c->set = 1; -#endif + c->set = true; } } c->refcnt++; - - return 0; -} - -int clk_enable_cansleep(struct clk *c) -{ - int ret; - unsigned long flags; - - mutex_lock(&dvfs_lock); - - if (clk_is_dvfs(c) && c->refcnt > 0) - dvfs_set_rate(c->dvfs, c->rate); - - spin_lock_irqsave(&clock_lock, flags); - ret = clk_enable_locked(c); - spin_unlock_irqrestore(&clock_lock, flags); - - mutex_unlock(&dvfs_lock); - +out: + spin_unlock_irqrestore(&c->spinlock, flags); return ret; } -EXPORT_SYMBOL(clk_enable_cansleep); +EXPORT_SYMBOL(clk_enable); -int clk_enable(struct clk *c) +void clk_disable(struct clk *c) { - int ret; unsigned long flags; - if (clk_is_dvfs(c)) - BUG(); - - spin_lock_irqsave(&clock_lock, flags); - ret = clk_enable_locked(c); - spin_unlock_irqrestore(&clock_lock, flags); - - return ret; -} -EXPORT_SYMBOL(clk_enable); + spin_lock_irqsave(&c->spinlock, flags); -void clk_disable_locked(struct clk *c) -{ - pr_debug("%s: %s\n", __func__, c->name); if (c->refcnt == 0) { WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); + spin_unlock_irqrestore(&c->spinlock, flags); return; } if (c->refcnt == 1) { @@ -257,71 +196,39 @@ void clk_disable_locked(struct clk *c) c->ops->disable(c); if (c->parent) - clk_disable_locked(c->parent); + clk_disable(c->parent); c->state = OFF; } c->refcnt--; -} - -void clk_disable_cansleep(struct clk *c) -{ - unsigned long flags; - - mutex_lock(&dvfs_lock); - - spin_lock_irqsave(&clock_lock, flags); - clk_disable_locked(c); - spin_unlock_irqrestore(&clock_lock, flags); - if (clk_is_dvfs(c) && c->refcnt == 0) - dvfs_set_rate(c->dvfs, c->rate); - - mutex_unlock(&dvfs_lock); -} -EXPORT_SYMBOL(clk_disable_cansleep); - -void clk_disable(struct clk *c) -{ - unsigned long flags; - - if (clk_is_dvfs(c)) - BUG(); - - spin_lock_irqsave(&clock_lock, flags); - clk_disable_locked(c); - spin_unlock_irqrestore(&clock_lock, flags); + spin_unlock_irqrestore(&c->spinlock, flags); } EXPORT_SYMBOL(clk_disable); -int clk_set_parent_locked(struct clk *c, struct clk *parent) +int clk_set_parent(struct clk *c, struct clk *parent) { int ret; + unsigned long flags; + unsigned long new_rate; + unsigned long old_rate; - pr_debug("%s: %s\n", __func__, c->name); + spin_lock_irqsave(&c->spinlock, flags); - if (!c->ops || !c->ops->set_parent) - return -ENOSYS; + if (!c->ops || !c->ops->set_parent) { + ret = -ENOSYS; + goto out; + } - ret = c->ops->set_parent(c, parent); + new_rate = clk_predict_rate_from_parent(c, parent); + old_rate = clk_get_rate_locked(c); + ret = c->ops->set_parent(c, parent); if (ret) - return ret; - - clk_recalculate_rate(c); - - propagate_rate(c); - - return 0; -} + goto out; -int clk_set_parent(struct clk *c, struct clk *parent) -{ - int ret; - unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); - ret = clk_set_parent_locked(c, parent); - spin_unlock_irqrestore(&clock_lock, flags); +out: + spin_unlock_irqrestore(&c->spinlock, flags); return ret; } EXPORT_SYMBOL(clk_set_parent); @@ -334,100 +241,86 @@ EXPORT_SYMBOL(clk_get_parent); int clk_set_rate_locked(struct clk *c, unsigned long rate) { - int ret; - - if (rate > c->max_rate) - rate = c->max_rate; + long new_rate; if (!c->ops || !c->ops->set_rate) return -ENOSYS; - ret = c->ops->set_rate(c, rate); - - if (ret) - return ret; - - clk_recalculate_rate(c); - - propagate_rate(c); - - return 0; -} - -int clk_set_rate_cansleep(struct clk *c, unsigned long rate) -{ - int ret = 0; - unsigned long flags; - - pr_debug("%s: %s\n", __func__, c->name); - - mutex_lock(&dvfs_lock); - - if (rate > c->rate) - ret = dvfs_set_rate(c->dvfs, rate); - if (ret) - goto out; + if (rate > c->max_rate) + rate = c->max_rate; - spin_lock_irqsave(&clock_lock, flags); - ret = clk_set_rate_locked(c, rate); - spin_unlock_irqrestore(&clock_lock, flags); + if (c->ops && c->ops->round_rate) { + new_rate = c->ops->round_rate(c, rate); - if (ret) - goto out; + if (new_rate < 0) + return new_rate; - ret = dvfs_set_rate(c->dvfs, rate); + rate = new_rate; + } -out: - mutex_unlock(&dvfs_lock); - return ret; + return c->ops->set_rate(c, rate); } -EXPORT_SYMBOL(clk_set_rate_cansleep); int clk_set_rate(struct clk *c, unsigned long rate) { - int ret = 0; + int ret; unsigned long flags; - pr_debug("%s: %s\n", __func__, c->name); - - if (clk_is_dvfs(c)) - BUG(); + spin_lock_irqsave(&c->spinlock, flags); - spin_lock_irqsave(&clock_lock, flags); ret = clk_set_rate_locked(c, rate); - spin_unlock_irqrestore(&clock_lock, flags); + + spin_unlock_irqrestore(&c->spinlock, flags); return ret; } EXPORT_SYMBOL(clk_set_rate); -unsigned long clk_get_rate(struct clk *c) -{ - unsigned long flags; - unsigned long ret; - - spin_lock_irqsave(&clock_lock, flags); - pr_debug("%s: %s\n", __func__, c->name); +/* Must be called with clocks lock and all indvidual clock locks held */ +unsigned long clk_get_rate_all_locked(struct clk *c) +{ + u64 rate; + int mul = 1; + int div = 1; + struct clk *p = c; + + while (p) { + c = p; + if (c->mul != 0 && c->div != 0) { + mul *= c->mul; + div *= c->div; + } + p = c->parent; + } - ret = c->rate; + rate = c->rate; + rate *= mul; + do_div(rate, div); - spin_unlock_irqrestore(&clock_lock, flags); - return ret; + return rate; } -EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *c, unsigned long rate) { - pr_debug("%s: %s\n", __func__, c->name); + unsigned long flags; + long ret; - if (!c->ops || !c->ops->round_rate) - return -ENOSYS; + spin_lock_irqsave(&c->spinlock, flags); + + if (!c->ops || !c->ops->round_rate) { + ret = -ENOSYS; + goto out; + } if (rate > c->max_rate) rate = c->max_rate; - return c->ops->round_rate(c, rate); + ret = c->ops->round_rate(c, rate); + +out: + spin_unlock_irqrestore(&c->spinlock, flags); + return ret; } EXPORT_SYMBOL(clk_round_rate); @@ -509,31 +402,90 @@ void __init tegra_init_clock(void) tegra2_init_clocks(); } -int __init tegra_init_dvfs(void) +/* + * The SDMMC controllers have extra bits in the clock source register that + * adjust the delay between the clock and data to compenstate for delays + * on the PCB. + */ +void tegra_sdmmc_tap_delay(struct clk *c, int delay) { - struct clk *c, *safe; + unsigned long flags; + + spin_lock_irqsave(&c->spinlock, flags); + tegra2_sdmmc_tap_delay(c, delay); + spin_unlock_irqrestore(&c->spinlock, flags); +} - mutex_lock(&dvfs_lock); +#ifdef CONFIG_DEBUG_FS - list_for_each_entry_safe(c, safe, &clocks, node) - if (c->dvfs) - dvfs_init(c); +static int __clk_lock_all_spinlocks(void) +{ + struct clk *c; - mutex_unlock(&dvfs_lock); + list_for_each_entry(c, &clocks, node) + if (!spin_trylock(&c->spinlock)) + goto unlock_spinlocks; return 0; + +unlock_spinlocks: + list_for_each_entry_continue_reverse(c, &clocks, node) + spin_unlock(&c->spinlock); + + return -EAGAIN; } -late_initcall(tegra_init_dvfs); +static void __clk_unlock_all_spinlocks(void) +{ + struct clk *c; + + list_for_each_entry_reverse(c, &clocks, node) + spin_unlock(&c->spinlock); +} + +/* + * This function retries until it can take all locks, and may take + * an arbitrarily long time to complete. + * Must be called with irqs enabled, returns with irqs disabled + * Must be called with clock_list_lock held + */ +static void clk_lock_all(void) +{ + int ret; +retry: + local_irq_disable(); + + ret = __clk_lock_all_spinlocks(); + if (ret) + goto failed_spinlocks; + + /* All locks taken successfully, return */ + return; + +failed_spinlocks: + local_irq_enable(); + yield(); + goto retry; +} + +/* + * Unlocks all clocks after a clk_lock_all + * Must be called with irqs disabled, returns with irqs enabled + * Must be called with clock_list_lock held + */ +static void clk_unlock_all(void) +{ + __clk_unlock_all_spinlocks(); + + local_irq_enable(); +} -#ifdef CONFIG_DEBUG_FS static struct dentry *clk_debugfs_root; static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) { struct clk *child; - struct clk *safe; const char *state = "uninit"; char div[8] = {0}; @@ -564,8 +516,12 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) c->rate > c->max_rate ? '!' : ' ', !c->set ? '*' : ' ', 30 - level * 3, c->name, - state, c->refcnt, div, c->rate); - list_for_each_entry_safe(child, safe, &c->children, sibling) { + state, c->refcnt, div, clk_get_rate_all_locked(c)); + + list_for_each_entry(child, &clocks, node) { + if (child->parent != c) + continue; + clock_tree_show_one(s, child, level + 1); } } @@ -573,14 +529,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) static int clock_tree_show(struct seq_file *s, void *data) { struct clk *c; - unsigned long flags; seq_printf(s, " clock state ref div rate\n"); seq_printf(s, "--------------------------------------------------------------\n"); - spin_lock_irqsave(&clock_lock, flags); + + mutex_lock(&clock_list_lock); + + clk_lock_all(); + list_for_each_entry(c, &clocks, node) if (c->parent == NULL) clock_tree_show_one(s, c, 0); - spin_unlock_irqrestore(&clock_lock, flags); + + clk_unlock_all(); + + mutex_unlock(&clock_list_lock); return 0; } diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h index 083a4cfc6cf0..688316abc64e 100644 --- a/arch/arm/mach-tegra/clock.h +++ b/arch/arm/mach-tegra/clock.h @@ -20,8 +20,9 @@ #ifndef __MACH_TEGRA_CLOCK_H #define __MACH_TEGRA_CLOCK_H -#include <linux/list.h> #include <linux/clkdev.h> +#include <linux/list.h> +#include <linux/spinlock.h> #define DIV_BUS (1 << 0) #define DIV_U71 (1 << 1) @@ -41,36 +42,13 @@ #define ENABLE_ON_INIT (1 << 28) struct clk; -struct regulator; - -struct dvfs_table { - unsigned long rate; - int millivolts; -}; - -struct dvfs_process_id_table { - int process_id; - struct dvfs_table *table; -}; - - -struct dvfs { - struct regulator *reg; - struct dvfs_table *table; - int max_millivolts; - - int process_id_table_length; - const char *reg_id; - bool cpu; - struct dvfs_process_id_table process_id_table[]; -}; struct clk_mux_sel { struct clk *input; u32 value; }; -struct clk_pll_table { +struct clk_pll_freq_table { unsigned long input_rate; unsigned long output_rate; u16 n; @@ -86,6 +64,7 @@ struct clk_ops { int (*set_parent)(struct clk *, struct clk *); int (*set_rate)(struct clk *, unsigned long); long (*round_rate)(struct clk *, unsigned long); + void (*reset)(struct clk *, bool); }; enum clk_state { @@ -96,55 +75,64 @@ enum clk_state { struct clk { /* node for master clocks list */ - struct list_head node; - struct list_head children; /* list of children */ - struct list_head sibling; /* node for children */ -#ifdef CONFIG_DEBUG_FS - struct dentry *dent; - struct dentry *parent_dent; -#endif - struct clk_ops *ops; - struct clk *parent; - struct clk_lookup lookup; - unsigned long rate; - unsigned long max_rate; - u32 flags; - u32 refcnt; - const char *name; - u32 reg; - u32 reg_shift; - unsigned int clk_num; - enum clk_state state; + struct list_head node; /* node for list of all clocks */ + struct clk_lookup lookup; + #ifdef CONFIG_DEBUG_FS - bool set; + struct dentry *dent; #endif + bool set; + struct clk_ops *ops; + unsigned long rate; + unsigned long max_rate; + unsigned long min_rate; + u32 flags; + const char *name; + + u32 refcnt; + enum clk_state state; + struct clk *parent; + u32 div; + u32 mul; - /* PLL */ - unsigned long input_min; - unsigned long input_max; - unsigned long cf_min; - unsigned long cf_max; - unsigned long vco_min; - unsigned long vco_max; - const struct clk_pll_table *pll_table; - - /* DIV */ - u32 div; - u32 mul; - - /* MUX */ const struct clk_mux_sel *inputs; - u32 sel; - u32 reg_mask; - - /* Virtual cpu clock */ - struct clk *main; - struct clk *backup; + u32 reg; + u32 reg_shift; - struct dvfs *dvfs; + struct list_head shared_bus_list; + + union { + struct { + unsigned int clk_num; + } periph; + struct { + unsigned long input_min; + unsigned long input_max; + unsigned long cf_min; + unsigned long cf_max; + unsigned long vco_min; + unsigned long vco_max; + const struct clk_pll_freq_table *freq_table; + int lock_delay; + } pll; + struct { + u32 sel; + u32 reg_mask; + } mux; + struct { + struct clk *main; + struct clk *backup; + } cpu; + struct { + struct list_head node; + bool enabled; + unsigned long rate; + } shared_bus_user; + } u; + + spinlock_t spinlock; }; - struct clk_duplicate { const char *name; struct clk_lookup lookup; @@ -163,11 +151,10 @@ void tegra2_periph_reset_assert(struct clk *c); void clk_init(struct clk *clk); struct clk *tegra_get_clock_by_name(const char *name); unsigned long clk_measure_input_freq(void); -void clk_disable_locked(struct clk *c); -int clk_enable_locked(struct clk *c); -int clk_set_parent_locked(struct clk *c, struct clk *parent); -int clk_set_rate_locked(struct clk *c, unsigned long rate); int clk_reparent(struct clk *c, struct clk *parent); void tegra_clk_init_from_table(struct tegra_clk_init_table *table); +unsigned long clk_get_rate_locked(struct clk *c); +int clk_set_rate_locked(struct clk *c, unsigned long rate); +void tegra2_sdmmc_tap_delay(struct clk *c, int delay); #endif diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index 7c91e2b9d643..d5e3f89b05af 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c @@ -25,12 +25,25 @@ #include <asm/hardware/cache-l2x0.h> #include <mach/iomap.h> -#include <mach/dma.h> +#include <mach/system.h> #include "board.h" #include "clock.h" #include "fuse.h" +void (*arch_reset)(char mode, const char *cmd) = tegra_assert_system_reset; + +void tegra_assert_system_reset(char mode, const char *cmd) +{ + void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04); + u32 reg; + + /* use *_related to avoid spinlock since caches are off */ + reg = readl_relaxed(reset); + reg |= 0x04; + writel_relaxed(reg, reset); +} + static __initdata struct tegra_clk_init_table common_clk_init_table[] = { /* name parent rate enabled */ { "clk_m", NULL, 0, true }, @@ -42,6 +55,9 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = { { "sclk", "pll_p_out4", 108000000, true }, { "hclk", "sclk", 108000000, true }, { "pclk", "hclk", 54000000, true }, + { "csite", NULL, 0, true }, + { "emc", NULL, 0, true }, + { "cpu", NULL, 0, true }, { NULL, NULL, 0, 0}, }; @@ -50,21 +66,18 @@ void __init tegra_init_cache(void) #ifdef CONFIG_CACHE_L2X0 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; - writel(0x331, p + L2X0_TAG_LATENCY_CTRL); - writel(0x441, p + L2X0_DATA_LATENCY_CTRL); + writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL); + writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL); l2x0_init(p, 0x6C080001, 0x8200c3fe); #endif } -void __init tegra_common_init(void) +void __init tegra_init_early(void) { tegra_init_fuse(); tegra_init_clock(); tegra_clk_init_from_table(common_clk_init_table); tegra_init_cache(); -#ifdef CONFIG_TEGRA_SYSTEM_DMA - tegra_dma_init(); -#endif } diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index fea5719c7072..0e1016a827ac 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -28,6 +28,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/suspend.h> #include <asm/system.h> @@ -36,21 +37,25 @@ /* Frequency table index must be sequential starting at 0 */ static struct cpufreq_frequency_table freq_table[] = { - { 0, 312000 }, - { 1, 456000 }, - { 2, 608000 }, - { 3, 760000 }, - { 4, 816000 }, - { 5, 912000 }, - { 6, 1000000 }, - { 7, CPUFREQ_TABLE_END }, + { 0, 216000 }, + { 1, 312000 }, + { 2, 456000 }, + { 3, 608000 }, + { 4, 760000 }, + { 5, 816000 }, + { 6, 912000 }, + { 7, 1000000 }, + { 8, CPUFREQ_TABLE_END }, }; #define NUM_CPUS 2 static struct clk *cpu_clk; +static struct clk *emc_clk; static unsigned long target_cpu_speed[NUM_CPUS]; +static DEFINE_MUTEX(tegra_cpu_lock); +static bool is_suspended; int tegra_verify_speed(struct cpufreq_policy *policy) { @@ -68,22 +73,28 @@ unsigned int tegra_getspeed(unsigned int cpu) return rate; } -static int tegra_update_cpu_speed(void) +static int tegra_update_cpu_speed(unsigned long rate) { - int i; - unsigned long rate = 0; int ret = 0; struct cpufreq_freqs freqs; - for_each_online_cpu(i) - rate = max(rate, target_cpu_speed[i]); - freqs.old = tegra_getspeed(0); freqs.new = rate; if (freqs.old == freqs.new) return ret; + /* + * Vote on memory bus frequency based on cpu frequency + * This sets the minimum frequency, display or avp may request higher + */ + if (rate >= 816000) + clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ + else if (rate >= 456000) + clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ + else + clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ + for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -92,7 +103,7 @@ static int tegra_update_cpu_speed(void) freqs.old, freqs.new); #endif - ret = clk_set_rate_cansleep(cpu_clk, freqs.new * 1000); + ret = clk_set_rate(cpu_clk, freqs.new * 1000); if (ret) { pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", freqs.new); @@ -105,12 +116,30 @@ static int tegra_update_cpu_speed(void) return 0; } +static unsigned long tegra_cpu_highest_speed(void) +{ + unsigned long rate = 0; + int i; + + for_each_online_cpu(i) + rate = max(rate, target_cpu_speed[i]); + return rate; +} + static int tegra_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int idx; unsigned int freq; + int ret = 0; + + mutex_lock(&tegra_cpu_lock); + + if (is_suspended) { + ret = -EBUSY; + goto out; + } cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &idx); @@ -119,9 +148,34 @@ static int tegra_target(struct cpufreq_policy *policy, target_cpu_speed[policy->cpu] = freq; - return tegra_update_cpu_speed(); + ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); + +out: + mutex_unlock(&tegra_cpu_lock); + return ret; } +static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, + void *dummy) +{ + mutex_lock(&tegra_cpu_lock); + if (event == PM_SUSPEND_PREPARE) { + is_suspended = true; + pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", + freq_table[0].frequency); + tegra_update_cpu_speed(freq_table[0].frequency); + } else if (event == PM_POST_SUSPEND) { + is_suspended = false; + } + mutex_unlock(&tegra_cpu_lock); + + return NOTIFY_OK; +} + +static struct notifier_block tegra_cpu_pm_notifier = { + .notifier_call = tegra_pm_notify, +}; + static int tegra_cpu_init(struct cpufreq_policy *policy) { if (policy->cpu >= NUM_CPUS) @@ -131,6 +185,15 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); + emc_clk = clk_get_sys("cpu", "emc"); + if (IS_ERR(emc_clk)) { + clk_put(cpu_clk); + return PTR_ERR(emc_clk); + } + + clk_enable(emc_clk); + clk_enable(cpu_clk); + cpufreq_frequency_table_cpuinfo(policy, freq_table); cpufreq_frequency_table_get_attr(freq_table, policy->cpu); policy->cur = tegra_getspeed(policy->cpu); @@ -142,12 +205,17 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_copy(policy->related_cpus, cpu_possible_mask); + if (policy->cpu == 0) + register_pm_notifier(&tegra_cpu_pm_notifier); + return 0; } static int tegra_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_cpuinfo(policy, freq_table); + clk_disable(emc_clk); + clk_put(emc_clk); clk_put(cpu_clk); return 0; } diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c new file mode 100644 index 000000000000..682e6d33108c --- /dev/null +++ b/arch/arm/mach-tegra/devices.c @@ -0,0 +1,505 @@ +/* + * Copyright (C) 2010,2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * Erik Gilling <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include <linux/resource.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/serial_8250.h> +#include <asm/pmu.h> +#include <mach/irqs.h> +#include <mach/iomap.h> +#include <mach/dma.h> + +static struct resource i2c_resource1[] = { + [0] = { + .start = INT_I2C, + .end = INT_I2C, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_I2C_BASE, + .end = TEGRA_I2C_BASE + TEGRA_I2C_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource i2c_resource2[] = { + [0] = { + .start = INT_I2C2, + .end = INT_I2C2, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_I2C2_BASE, + .end = TEGRA_I2C2_BASE + TEGRA_I2C2_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource i2c_resource3[] = { + [0] = { + .start = INT_I2C3, + .end = INT_I2C3, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_I2C3_BASE, + .end = TEGRA_I2C3_BASE + TEGRA_I2C3_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource i2c_resource4[] = { + [0] = { + .start = INT_DVC, + .end = INT_DVC, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_DVC_BASE, + .end = TEGRA_DVC_BASE + TEGRA_DVC_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +struct platform_device tegra_i2c_device1 = { + .name = "tegra-i2c", + .id = 0, + .resource = i2c_resource1, + .num_resources = ARRAY_SIZE(i2c_resource1), + .dev = { + .platform_data = 0, + }, +}; + +struct platform_device tegra_i2c_device2 = { + .name = "tegra-i2c", + .id = 1, + .resource = i2c_resource2, + .num_resources = ARRAY_SIZE(i2c_resource2), + .dev = { + .platform_data = 0, + }, +}; + +struct platform_device tegra_i2c_device3 = { + .name = "tegra-i2c", + .id = 2, + .resource = i2c_resource3, + .num_resources = ARRAY_SIZE(i2c_resource3), + .dev = { + .platform_data = 0, + }, +}; + +struct platform_device tegra_i2c_device4 = { + .name = "tegra-i2c", + .id = 3, + .resource = i2c_resource4, + .num_resources = ARRAY_SIZE(i2c_resource4), + .dev = { + .platform_data = 0, + }, +}; + +static struct resource spi_resource1[] = { + [0] = { + .start = INT_S_LINK1, + .end = INT_S_LINK1, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SPI1_BASE, + .end = TEGRA_SPI1_BASE + TEGRA_SPI1_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource spi_resource2[] = { + [0] = { + .start = INT_SPI_2, + .end = INT_SPI_2, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SPI2_BASE, + .end = TEGRA_SPI2_BASE + TEGRA_SPI2_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource spi_resource3[] = { + [0] = { + .start = INT_SPI_3, + .end = INT_SPI_3, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SPI3_BASE, + .end = TEGRA_SPI3_BASE + TEGRA_SPI3_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource spi_resource4[] = { + [0] = { + .start = INT_SPI_4, + .end = INT_SPI_4, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SPI4_BASE, + .end = TEGRA_SPI4_BASE + TEGRA_SPI4_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +struct platform_device tegra_spi_device1 = { + .name = "spi_tegra", + .id = 0, + .resource = spi_resource1, + .num_resources = ARRAY_SIZE(spi_resource1), + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device tegra_spi_device2 = { + .name = "spi_tegra", + .id = 1, + .resource = spi_resource2, + .num_resources = ARRAY_SIZE(spi_resource2), + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device tegra_spi_device3 = { + .name = "spi_tegra", + .id = 2, + .resource = spi_resource3, + .num_resources = ARRAY_SIZE(spi_resource3), + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device tegra_spi_device4 = { + .name = "spi_tegra", + .id = 3, + .resource = spi_resource4, + .num_resources = ARRAY_SIZE(spi_resource4), + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + + +static struct resource sdhci_resource1[] = { + [0] = { + .start = INT_SDMMC1, + .end = INT_SDMMC1, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SDMMC1_BASE, + .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource sdhci_resource2[] = { + [0] = { + .start = INT_SDMMC2, + .end = INT_SDMMC2, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SDMMC2_BASE, + .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource sdhci_resource3[] = { + [0] = { + .start = INT_SDMMC3, + .end = INT_SDMMC3, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SDMMC3_BASE, + .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource sdhci_resource4[] = { + [0] = { + .start = INT_SDMMC4, + .end = INT_SDMMC4, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = TEGRA_SDMMC4_BASE, + .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1, + .flags = IORESOURCE_MEM, + }, +}; + +/* board files should fill in platform_data register the devices themselvs. + * See board-harmony.c for an example + */ +struct platform_device tegra_sdhci_device1 = { + .name = "sdhci-tegra", + .id = 0, + .resource = sdhci_resource1, + .num_resources = ARRAY_SIZE(sdhci_resource1), +}; + +struct platform_device tegra_sdhci_device2 = { + .name = "sdhci-tegra", + .id = 1, + .resource = sdhci_resource2, + .num_resources = ARRAY_SIZE(sdhci_resource2), +}; + +struct platform_device tegra_sdhci_device3 = { + .name = "sdhci-tegra", + .id = 2, + .resource = sdhci_resource3, + .num_resources = ARRAY_SIZE(sdhci_resource3), +}; + +struct platform_device tegra_sdhci_device4 = { + .name = "sdhci-tegra", + .id = 3, + .resource = sdhci_resource4, + .num_resources = ARRAY_SIZE(sdhci_resource4), +}; + +static struct resource tegra_usb1_resources[] = { + [0] = { + .start = TEGRA_USB_BASE, + .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_USB, + .end = INT_USB, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource tegra_usb2_resources[] = { + [0] = { + .start = TEGRA_USB2_BASE, + .end = TEGRA_USB2_BASE + TEGRA_USB2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_USB2, + .end = INT_USB2, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource tegra_usb3_resources[] = { + [0] = { + .start = TEGRA_USB3_BASE, + .end = TEGRA_USB3_BASE + TEGRA_USB3_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_USB3, + .end = INT_USB3, + .flags = IORESOURCE_IRQ, + }, +}; + +static u64 tegra_ehci_dmamask = DMA_BIT_MASK(32); + +struct platform_device tegra_ehci1_device = { + .name = "tegra-ehci", + .id = 0, + .dev = { + .dma_mask = &tegra_ehci_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .resource = tegra_usb1_resources, + .num_resources = ARRAY_SIZE(tegra_usb1_resources), +}; + +struct platform_device tegra_ehci2_device = { + .name = "tegra-ehci", + .id = 1, + .dev = { + .dma_mask = &tegra_ehci_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .resource = tegra_usb2_resources, + .num_resources = ARRAY_SIZE(tegra_usb2_resources), +}; + +struct platform_device tegra_ehci3_device = { + .name = "tegra-ehci", + .id = 2, + .dev = { + .dma_mask = &tegra_ehci_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .resource = tegra_usb3_resources, + .num_resources = ARRAY_SIZE(tegra_usb3_resources), +}; + +static struct resource tegra_pmu_resources[] = { + [0] = { + .start = INT_CPU0_PMU_INTR, + .end = INT_CPU0_PMU_INTR, + .flags = IORESOURCE_IRQ, + }, + [1] = { + .start = INT_CPU1_PMU_INTR, + .end = INT_CPU1_PMU_INTR, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device tegra_pmu_device = { + .name = "arm-pmu", + .id = ARM_PMU_DEVICE_CPU, + .num_resources = ARRAY_SIZE(tegra_pmu_resources), + .resource = tegra_pmu_resources, +}; + +static struct resource tegra_uarta_resources[] = { + [0] = { + .start = TEGRA_UARTA_BASE, + .end = TEGRA_UARTA_BASE + TEGRA_UARTA_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_UARTA, + .end = INT_UARTA, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource tegra_uartb_resources[] = { + [0] = { + .start = TEGRA_UARTB_BASE, + .end = TEGRA_UARTB_BASE + TEGRA_UARTB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_UARTB, + .end = INT_UARTB, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource tegra_uartc_resources[] = { + [0] = { + .start = TEGRA_UARTC_BASE, + .end = TEGRA_UARTC_BASE + TEGRA_UARTC_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_UARTC, + .end = INT_UARTC, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource tegra_uartd_resources[] = { + [0] = { + .start = TEGRA_UARTD_BASE, + .end = TEGRA_UARTD_BASE + TEGRA_UARTD_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_UARTD, + .end = INT_UARTD, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource tegra_uarte_resources[] = { + [0] = { + .start = TEGRA_UARTE_BASE, + .end = TEGRA_UARTE_BASE + TEGRA_UARTE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = INT_UARTE, + .end = INT_UARTE, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device tegra_uarta_device = { + .name = "tegra_uart", + .id = 0, + .num_resources = ARRAY_SIZE(tegra_uarta_resources), + .resource = tegra_uarta_resources, + .dev = { + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +struct platform_device tegra_uartb_device = { + .name = "tegra_uart", + .id = 1, + .num_resources = ARRAY_SIZE(tegra_uartb_resources), + .resource = tegra_uartb_resources, + .dev = { + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +struct platform_device tegra_uartc_device = { + .name = "tegra_uart", + .id = 2, + .num_resources = ARRAY_SIZE(tegra_uartc_resources), + .resource = tegra_uartc_resources, + .dev = { + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +struct platform_device tegra_uartd_device = { + .name = "tegra_uart", + .id = 3, + .num_resources = ARRAY_SIZE(tegra_uartd_resources), + .resource = tegra_uartd_resources, + .dev = { + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +struct platform_device tegra_uarte_device = { + .name = "tegra_uart", + .id = 4, + .num_resources = ARRAY_SIZE(tegra_uarte_resources), + .resource = tegra_uarte_resources, + .dev = { + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h new file mode 100644 index 000000000000..888810c37ee9 --- /dev/null +++ b/arch/arm/mach-tegra/devices.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2010,2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * Erik Gilling <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MACH_TEGRA_DEVICES_H +#define __MACH_TEGRA_DEVICES_H + +#include <linux/platform_device.h> + +extern struct platform_device tegra_sdhci_device1; +extern struct platform_device tegra_sdhci_device2; +extern struct platform_device tegra_sdhci_device3; +extern struct platform_device tegra_sdhci_device4; +extern struct platform_device tegra_i2c_device1; +extern struct platform_device tegra_i2c_device2; +extern struct platform_device tegra_i2c_device3; +extern struct platform_device tegra_i2c_device4; +extern struct platform_device tegra_spi_device1; +extern struct platform_device tegra_spi_device2; +extern struct platform_device tegra_spi_device3; +extern struct platform_device tegra_spi_device4; +extern struct platform_device tegra_ehci1_device; +extern struct platform_device tegra_ehci2_device; +extern struct platform_device tegra_ehci3_device; +extern struct platform_device tegra_uarta_device; +extern struct platform_device tegra_uartb_device; +extern struct platform_device tegra_uartc_device; +extern struct platform_device tegra_uartd_device; +extern struct platform_device tegra_uarte_device; +extern struct platform_device tegra_pmu_device; + +#endif diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index edda6ec5e925..e945ae28ee77 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c @@ -27,9 +27,11 @@ #include <linux/err.h> #include <linux/irq.h> #include <linux/delay.h> +#include <linux/clk.h> #include <mach/dma.h> #include <mach/irqs.h> #include <mach/iomap.h> +#include <mach/suspend.h> #define APB_DMA_GEN 0x000 #define GEN_ENABLE (1<<31) @@ -120,17 +122,14 @@ struct tegra_dma_channel { void __iomem *addr; int mode; int irq; - - /* Register shadow */ - u32 csr; - u32 ahb_seq; - u32 ahb_ptr; - u32 apb_seq; - u32 apb_ptr; + int req_transfer_count; }; #define NV_DMA_MAX_CHANNELS 32 +static bool tegra_dma_initialized; +static DEFINE_MUTEX(tegra_dma_lock); + static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; @@ -138,7 +137,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, struct tegra_dma_req *req); static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, struct tegra_dma_req *req); -static void tegra_dma_init_hw(struct tegra_dma_channel *ch); static void tegra_dma_stop(struct tegra_dma_channel *ch); void tegra_dma_flush(struct tegra_dma_channel *ch) @@ -150,6 +148,9 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) { struct tegra_dma_req *req; + if (tegra_dma_is_empty(ch)) + return; + req = list_entry(ch->list.next, typeof(*req), node); tegra_dma_dequeue_req(ch, req); @@ -158,10 +159,10 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) void tegra_dma_stop(struct tegra_dma_channel *ch) { - unsigned int csr; - unsigned int status; + u32 csr; + u32 status; - csr = ch->csr; + csr = readl(ch->addr + APB_DMA_CHAN_CSR); csr &= ~CSR_IE_EOC; writel(csr, ch->addr + APB_DMA_CHAN_CSR); @@ -175,19 +176,16 @@ void tegra_dma_stop(struct tegra_dma_channel *ch) int tegra_dma_cancel(struct tegra_dma_channel *ch) { - unsigned int csr; + u32 csr; unsigned long irq_flags; spin_lock_irqsave(&ch->lock, irq_flags); while (!list_empty(&ch->list)) list_del(ch->list.next); - csr = ch->csr; + csr = readl(ch->addr + APB_DMA_CHAN_CSR); csr &= ~CSR_REQ_SEL_MASK; csr |= CSR_REQ_SEL_INVALID; - - /* Set the enable as that is not shadowed */ - csr |= CSR_ENB; writel(csr, ch->addr + APB_DMA_CHAN_CSR); tegra_dma_stop(ch); @@ -229,18 +227,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, * - Finally stop or program the DMA to the next buffer in the * list. */ - csr = ch->csr; + csr = readl(ch->addr + APB_DMA_CHAN_CSR); csr &= ~CSR_REQ_SEL_MASK; csr |= CSR_REQ_SEL_INVALID; - - /* Set the enable as that is not shadowed */ - csr |= CSR_ENB; writel(csr, ch->addr + APB_DMA_CHAN_CSR); /* Get the transfer count */ status = readl(ch->addr + APB_DMA_CHAN_STA); to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; - req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; + req_transfer_count = ch->req_transfer_count; req_transfer_count += 1; to_transfer += 1; @@ -318,6 +313,7 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, struct tegra_dma_req *req) { unsigned long irq_flags; + struct tegra_dma_req *_req; int start_dma = 0; if (req->size > NV_DMA_MAX_TRASFER_SIZE || @@ -328,6 +324,13 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, spin_lock_irqsave(&ch->lock, irq_flags); + list_for_each_entry(_req, &ch->list, node) { + if (req == _req) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return -EEXIST; + } + } + req->bytes_transferred = 0; req->status = 0; req->buffer_status = 0; @@ -348,7 +351,12 @@ EXPORT_SYMBOL(tegra_dma_enqueue_req); struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) { int channel; - struct tegra_dma_channel *ch; + struct tegra_dma_channel *ch = NULL; + + if (WARN_ON(!tegra_dma_initialized)) + return NULL; + + mutex_lock(&tegra_dma_lock); /* first channel is the shared channel */ if (mode & TEGRA_DMA_SHARED) { @@ -357,11 +365,14 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) channel = find_first_zero_bit(channel_usage, ARRAY_SIZE(dma_channels)); if (channel >= ARRAY_SIZE(dma_channels)) - return NULL; + goto out; } __set_bit(channel, channel_usage); ch = &dma_channels[channel]; ch->mode = mode; + +out: + mutex_unlock(&tegra_dma_lock); return ch; } EXPORT_SYMBOL(tegra_dma_allocate_channel); @@ -371,22 +382,27 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch) if (ch->mode & TEGRA_DMA_SHARED) return; tegra_dma_cancel(ch); + mutex_lock(&tegra_dma_lock); __clear_bit(ch->id, channel_usage); + mutex_unlock(&tegra_dma_lock); } EXPORT_SYMBOL(tegra_dma_free_channel); static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, struct tegra_dma_req *req) { + u32 apb_ptr; + u32 ahb_ptr; + if (req->to_memory) { - ch->apb_ptr = req->source_addr; - ch->ahb_ptr = req->dest_addr; + apb_ptr = req->source_addr; + ahb_ptr = req->dest_addr; } else { - ch->apb_ptr = req->dest_addr; - ch->ahb_ptr = req->source_addr; + apb_ptr = req->dest_addr; + ahb_ptr = req->source_addr; } - writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); - writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); + writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); + writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); req->status = TEGRA_DMA_REQ_INFLIGHT; return; @@ -400,38 +416,39 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, int ahb_bus_width; int apb_bus_width; int index; - unsigned long csr; + u32 ahb_seq; + u32 apb_seq; + u32 ahb_ptr; + u32 apb_ptr; + u32 csr; + + csr = CSR_IE_EOC | CSR_FLOW; + ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; + apb_seq = 0; - ch->csr |= CSR_FLOW; - ch->csr &= ~CSR_REQ_SEL_MASK; - ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT; - ch->ahb_seq &= ~AHB_SEQ_BURST_MASK; - ch->ahb_seq |= AHB_SEQ_BURST_1; + csr |= req->req_sel << CSR_REQ_SEL_SHIFT; /* One shot mode is always single buffered, * continuous mode is always double buffered * */ if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { - ch->csr |= CSR_ONCE; - ch->ahb_seq &= ~AHB_SEQ_DBL_BUF; - ch->csr &= ~CSR_WCOUNT_MASK; - ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT; + csr |= CSR_ONCE; + ch->req_transfer_count = (req->size >> 2) - 1; } else { - ch->csr &= ~CSR_ONCE; - ch->ahb_seq |= AHB_SEQ_DBL_BUF; + ahb_seq |= AHB_SEQ_DBL_BUF; /* In double buffered mode, we set the size to half the * requested size and interrupt when half the buffer * is full */ - ch->csr &= ~CSR_WCOUNT_MASK; - ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT; + ch->req_transfer_count = (req->size >> 3) - 1; } + csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; + if (req->to_memory) { - ch->csr &= ~CSR_DIR; - ch->apb_ptr = req->source_addr; - ch->ahb_ptr = req->dest_addr; + apb_ptr = req->source_addr; + ahb_ptr = req->dest_addr; apb_addr_wrap = req->source_wrap; ahb_addr_wrap = req->dest_wrap; @@ -439,9 +456,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, ahb_bus_width = req->dest_bus_width; } else { - ch->csr |= CSR_DIR; - ch->apb_ptr = req->dest_addr; - ch->ahb_ptr = req->source_addr; + csr |= CSR_DIR; + apb_ptr = req->dest_addr; + ahb_ptr = req->source_addr; apb_addr_wrap = req->dest_wrap; ahb_addr_wrap = req->source_wrap; @@ -460,8 +477,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, index++; } while (index < ARRAY_SIZE(apb_addr_wrap_table)); BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); - ch->apb_seq &= ~APB_SEQ_WRAP_MASK; - ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT; + apb_seq |= index << APB_SEQ_WRAP_SHIFT; /* set address wrap for AHB size */ index = 0; @@ -471,55 +487,42 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, index++; } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); - ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK; - ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; + ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { if (bus_width_table[index] == ahb_bus_width) break; } BUG_ON(index == ARRAY_SIZE(bus_width_table)); - ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK; - ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; + ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { if (bus_width_table[index] == apb_bus_width) break; } BUG_ON(index == ARRAY_SIZE(bus_width_table)); - ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK; - ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; - - ch->csr |= CSR_IE_EOC; + apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; - /* update hw registers with the shadow */ - writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR); - writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); - writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); - writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); - writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); + writel(csr, ch->addr + APB_DMA_CHAN_CSR); + writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); + writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); + writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); + writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); - csr = ch->csr | CSR_ENB; + csr |= CSR_ENB; writel(csr, ch->addr + APB_DMA_CHAN_CSR); req->status = TEGRA_DMA_REQ_INFLIGHT; } -static void tegra_dma_init_hw(struct tegra_dma_channel *ch) -{ - /* One shot with an interrupt to CPU after transfer */ - ch->csr = CSR_ONCE | CSR_IE_EOC; - ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB; - ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT; -} - static void handle_oneshot_dma(struct tegra_dma_channel *ch) { struct tegra_dma_req *req; + unsigned long irq_flags; - spin_lock(&ch->lock); + spin_lock_irqsave(&ch->lock, irq_flags); if (list_empty(&ch->list)) { - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); return; } @@ -527,8 +530,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) if (req) { int bytes_transferred; - bytes_transferred = - (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; + bytes_transferred = ch->req_transfer_count; bytes_transferred += 1; bytes_transferred <<= 2; @@ -536,12 +538,12 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) req->bytes_transferred = bytes_transferred; req->status = TEGRA_DMA_REQ_SUCCESS; - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); /* Callback should be called without any lock */ pr_debug("%s: transferred %d bytes\n", __func__, req->bytes_transferred); req->complete(req); - spin_lock(&ch->lock); + spin_lock_irqsave(&ch->lock, irq_flags); } if (!list_empty(&ch->list)) { @@ -551,22 +553,55 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) if (req->status != TEGRA_DMA_REQ_INFLIGHT) tegra_dma_update_hw(ch, req); } - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); } static void handle_continuous_dma(struct tegra_dma_channel *ch) { struct tegra_dma_req *req; + unsigned long irq_flags; - spin_lock(&ch->lock); + spin_lock_irqsave(&ch->lock, irq_flags); if (list_empty(&ch->list)) { - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); return; } req = list_entry(ch->list.next, typeof(*req), node); if (req) { if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { + bool is_dma_ping_complete; + is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) + & STA_PING_PONG) ? true : false; + if (req->to_memory) + is_dma_ping_complete = !is_dma_ping_complete; + /* Out of sync - Release current buffer */ + if (!is_dma_ping_complete) { + int bytes_transferred; + + bytes_transferred = ch->req_transfer_count; + bytes_transferred += 1; + bytes_transferred <<= 3; + req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; + req->bytes_transferred = bytes_transferred; + req->status = TEGRA_DMA_REQ_SUCCESS; + tegra_dma_stop(ch); + + if (!list_is_last(&req->node, &ch->list)) { + struct tegra_dma_req *next_req; + + next_req = list_entry(req->node.next, + typeof(*next_req), node); + tegra_dma_update_hw(ch, next_req); + } + + list_del(&req->node); + + /* DMA lock is NOT held when callbak is called */ + spin_unlock_irqrestore(&ch->lock, irq_flags); + req->complete(req); + return; + } /* Load the next request into the hardware, if available * */ if (!list_is_last(&req->node, &ch->list)) { @@ -579,7 +614,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; req->status = TEGRA_DMA_REQ_SUCCESS; /* DMA lock is NOT held when callback is called */ - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); if (likely(req->threshold)) req->threshold(req); return; @@ -590,8 +625,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) * the second interrupt */ int bytes_transferred; - bytes_transferred = - (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; + bytes_transferred = ch->req_transfer_count; bytes_transferred += 1; bytes_transferred <<= 3; @@ -601,7 +635,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) list_del(&req->node); /* DMA lock is NOT held when callbak is called */ - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); req->complete(req); return; @@ -609,7 +643,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) BUG(); } } - spin_unlock(&ch->lock); + spin_unlock_irqrestore(&ch->lock, irq_flags); } static irqreturn_t dma_isr(int irq, void *data) @@ -646,6 +680,21 @@ int __init tegra_dma_init(void) int i; unsigned int irq; void __iomem *addr; + struct clk *c; + + bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); + + c = clk_get_sys("tegra-dma", NULL); + if (IS_ERR(c)) { + pr_err("Unable to get clock for APB DMA\n"); + ret = PTR_ERR(c); + goto fail; + } + ret = clk_enable(c); + if (ret != 0) { + pr_err("Unable to enable clock for APB DMA\n"); + goto fail; + } addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); writel(GEN_ENABLE, addr + APB_DMA_GEN); @@ -653,18 +702,9 @@ int __init tegra_dma_init(void) writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), addr + APB_DMA_IRQ_MASK_SET); - memset(channel_usage, 0, sizeof(channel_usage)); - memset(dma_channels, 0, sizeof(dma_channels)); - - /* Reserve all the channels we are not supposed to touch */ - for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++) - __set_bit(i, channel_usage); - for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { struct tegra_dma_channel *ch = &dma_channels[i]; - __clear_bit(i, channel_usage); - ch->id = i; snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); @@ -673,7 +713,6 @@ int __init tegra_dma_init(void) spin_lock_init(&ch->lock); INIT_LIST_HEAD(&ch->list); - tegra_dma_init_hw(ch); irq = INT_APB_DMA_CH0 + i; ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, @@ -684,14 +723,15 @@ int __init tegra_dma_init(void) goto fail; } ch->irq = irq; + + __clear_bit(i, channel_usage); } /* mark the shared channel allocated */ __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); - for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++) - __set_bit(i, channel_usage); + tegra_dma_initialized = true; - return ret; + return 0; fail: writel(0, addr + APB_DMA_GEN); for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { @@ -701,6 +741,7 @@ fail: } return ret; } +postcore_initcall(tegra_dma_init); #ifdef CONFIG_PM static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index ad8048801513..12090a2cf3e0 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c @@ -25,6 +25,7 @@ #include <linux/gpio.h> #include <mach/iomap.h> +#include <mach/suspend.h> #define GPIO_BANK(x) ((x) >> 5) #define GPIO_PORT(x) (((x) >> 3) & 0x3) @@ -380,6 +381,20 @@ static int __init tegra_gpio_init(void) postcore_initcall(tegra_gpio_init); +void __init tegra_gpio_config(struct tegra_gpio_table *table, int num) +{ + int i; + + for (i = 0; i < num; i++) { + int gpio = table[i].gpio; + + if (table[i].enable) + tegra_gpio_enable(gpio); + else + tegra_gpio_disable(gpio); + } +} + #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h index a217f68ba57c..c8baf8f80d23 100644 --- a/arch/arm/mach-tegra/include/mach/clk.h +++ b/arch/arm/mach-tegra/include/mach/clk.h @@ -25,9 +25,7 @@ struct clk; void tegra_periph_reset_deassert(struct clk *c); void tegra_periph_reset_assert(struct clk *c); -int clk_enable_cansleep(struct clk *clk); -void clk_disable_cansleep(struct clk *clk); -int clk_set_rate_cansleep(struct clk *clk, unsigned long rate); -int clk_set_parent_cansleep(struct clk *clk, struct clk *parent); +unsigned long clk_get_rate_all_locked(struct clk *c); +void tegra_sdmmc_tap_delay(struct clk *c, int delay); #endif diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S index a0e7c12868bd..e0ebe65c1657 100644 --- a/arch/arm/mach-tegra/include/mach/debug-macro.S +++ b/arch/arm/mach-tegra/include/mach/debug-macro.S @@ -19,30 +19,15 @@ */ #include <mach/io.h> +#include <mach/iomap.h> .macro addruart, rp, rv ldr \rp, =IO_APB_PHYS @ physical ldr \rv, =IO_APB_VIRT @ virtual -#if defined(CONFIG_TEGRA_DEBUG_UART_NONE) -#error "A debug UART must be selected in the kernel config to use DEBUG_LL" -#elif defined(CONFIG_TEGRA_DEBUG_UARTA) - orr \rp, \rp, #0x6000 - orr \rv, \rv, #0x6000 -#elif defined(CONFIG_TEGRA_DEBUG_UARTB) - orr \rp, \rp, #0x6000 - orr \rp, \rp, #0x40 - orr \rv, \rv, #0x6000 - orr \rv, \rv, #0x40 -#elif defined(CONFIG_TEGRA_DEBUG_UARTC) - orr \rp, \rp, #0x6200 - orr \rv, \rv, #0x6200 -#elif defined(CONFIG_TEGRA_DEBUG_UARTD) - orr \rp, \rp, #0x6300 - orr \rv, \rv, #0x6300 -#elif defined(CONFIG_TEGRA_DEBUG_UARTE) - orr \rp, \rp, #0x6400 - orr \rv, \rv, #0x6400 -#endif + orr \rp, \rp, #(TEGRA_DEBUG_UART_BASE & 0xFF) + orr \rp, \rp, #(TEGRA_DEBUG_UART_BASE & 0xFF00) + orr \rv, \rv, #(TEGRA_DEBUG_UART_BASE & 0xFF) + orr \rv, \rv, #(TEGRA_DEBUG_UART_BASE & 0xFF00) .endm #define UART_SHIFT 2 diff --git a/arch/arm/mach-tegra/include/mach/gpio.h b/arch/arm/mach-tegra/include/mach/gpio.h index e31f486d69a2..196f114dc241 100644 --- a/arch/arm/mach-tegra/include/mach/gpio.h +++ b/arch/arm/mach-tegra/include/mach/gpio.h @@ -20,6 +20,7 @@ #ifndef __MACH_TEGRA_GPIO_H #define __MACH_TEGRA_GPIO_H +#include <linux/init.h> #include <mach/irqs.h> #define TEGRA_NR_GPIOS INT_GPIO_NR @@ -31,7 +32,7 @@ #define gpio_cansleep __gpio_cansleep #define TEGRA_GPIO_TO_IRQ(gpio) (INT_GPIO_BASE + (gpio)) -#define TEGRA_IRQ_TO_GPIO(irq) ((gpio) - INT_GPIO_BASE) +#define TEGRA_IRQ_TO_GPIO(irq) ((irq) - INT_GPIO_BASE) static inline int gpio_to_irq(unsigned int gpio) { @@ -47,6 +48,12 @@ static inline int irq_to_gpio(unsigned int irq) return -EINVAL; } +struct tegra_gpio_table { + int gpio; /* GPIO number */ + bool enable; /* Enable for GPIO at init? */ +}; + +void tegra_gpio_config(struct tegra_gpio_table *table, int num); void tegra_gpio_enable(int gpio); void tegra_gpio_disable(int gpio); diff --git a/arch/arm/mach-tegra/include/mach/harmony_audio.h b/arch/arm/mach-tegra/include/mach/harmony_audio.h new file mode 100644 index 000000000000..af086500ab7d --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/harmony_audio.h @@ -0,0 +1,22 @@ +/* + * arch/arm/mach-tegra/include/mach/harmony_audio.h + * + * Copyright 2011 NVIDIA, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +struct harmony_audio_platform_data { + int gpio_spkr_en; + int gpio_hp_det; + int gpio_int_mic_en; + int gpio_ext_mic_en; +}; diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h index 44a4f4bcf91f..691cdabd69cf 100644 --- a/arch/arm/mach-tegra/include/mach/iomap.h +++ b/arch/arm/mach-tegra/include/mach/iomap.h @@ -26,6 +26,9 @@ #define TEGRA_IRAM_BASE 0x40000000 #define TEGRA_IRAM_SIZE SZ_256K +#define TEGRA_HOST1X_BASE 0x50000000 +#define TEGRA_HOST1X_SIZE 0x24000 + #define TEGRA_ARM_PERIF_BASE 0x50040000 #define TEGRA_ARM_PERIF_SIZE SZ_8K @@ -35,12 +38,30 @@ #define TEGRA_ARM_INT_DIST_BASE 0x50041000 #define TEGRA_ARM_INT_DIST_SIZE SZ_4K +#define TEGRA_MPE_BASE 0x54040000 +#define TEGRA_MPE_SIZE SZ_256K + +#define TEGRA_VI_BASE 0x54080000 +#define TEGRA_VI_SIZE SZ_256K + +#define TEGRA_ISP_BASE 0x54100000 +#define TEGRA_ISP_SIZE SZ_256K + #define TEGRA_DISPLAY_BASE 0x54200000 #define TEGRA_DISPLAY_SIZE SZ_256K #define TEGRA_DISPLAY2_BASE 0x54240000 #define TEGRA_DISPLAY2_SIZE SZ_256K +#define TEGRA_HDMI_BASE 0x54280000 +#define TEGRA_HDMI_SIZE SZ_256K + +#define TEGRA_GART_BASE 0x58000000 +#define TEGRA_GART_SIZE SZ_32M + +#define TEGRA_RES_SEMA_BASE 0x60001000 +#define TEGRA_RES_SEMA_SIZE SZ_4K + #define TEGRA_PRIMARY_ICTLR_BASE 0x60004000 #define TEGRA_PRIMARY_ICTLR_SIZE SZ_64 @@ -140,6 +161,18 @@ #define TEGRA_PWFM_BASE 0x7000A000 #define TEGRA_PWFM_SIZE SZ_256 +#define TEGRA_PWFM0_BASE 0x7000A000 +#define TEGRA_PWFM0_SIZE 4 + +#define TEGRA_PWFM1_BASE 0x7000A010 +#define TEGRA_PWFM1_SIZE 4 + +#define TEGRA_PWFM2_BASE 0x7000A020 +#define TEGRA_PWFM2_SIZE 4 + +#define TEGRA_PWFM3_BASE 0x7000A030 +#define TEGRA_PWFM3_SIZE 4 + #define TEGRA_MIPI_BASE 0x7000B000 #define TEGRA_MIPI_SIZE SZ_256 @@ -221,4 +254,18 @@ #define TEGRA_SDMMC4_BASE 0xC8000600 #define TEGRA_SDMMC4_SIZE SZ_512 +#if defined(CONFIG_TEGRA_DEBUG_UART_NONE) +# define TEGRA_DEBUG_UART_BASE 0 +#elif defined(CONFIG_TEGRA_DEBUG_UARTA) +# define TEGRA_DEBUG_UART_BASE TEGRA_UARTA_BASE +#elif defined(CONFIG_TEGRA_DEBUG_UARTB) +# define TEGRA_DEBUG_UART_BASE TEGRA_UARTB_BASE +#elif defined(CONFIG_TEGRA_DEBUG_UARTC) +# define TEGRA_DEBUG_UART_BASE TEGRA_UARTC_BASE +#elif defined(CONFIG_TEGRA_DEBUG_UARTD) +# define TEGRA_DEBUG_UART_BASE TEGRA_UARTD_BASE +#elif defined(CONFIG_TEGRA_DEBUG_UARTE) +# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE +#endif + #endif diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h index 71bbf3422953..73265af4dda3 100644 --- a/arch/arm/mach-tegra/include/mach/irqs.h +++ b/arch/arm/mach-tegra/include/mach/irqs.h @@ -88,7 +88,7 @@ #define INT_SYS_STATS_MON (INT_SEC_BASE + 22) #define INT_GPIO5 (INT_SEC_BASE + 23) #define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24) -#define INT_CPU2_PMU_INTR (INT_SEC_BASE + 25) +#define INT_CPU1_PMU_INTR (INT_SEC_BASE + 25) #define INT_SEC_RES_26 (INT_SEC_BASE + 26) #define INT_S_LINK1 (INT_SEC_BASE + 27) #define INT_APB_DMA_COP (INT_SEC_BASE + 28) @@ -166,10 +166,18 @@ #define INT_QUAD_RES_30 (INT_QUAD_BASE + 30) #define INT_QUAD_RES_31 (INT_QUAD_BASE + 31) -#define INT_GPIO_BASE (INT_QUAD_BASE + 32) +#define INT_MAIN_NR (INT_QUAD_BASE + 32 - INT_PRI_BASE) + +#define INT_GPIO_BASE (INT_PRI_BASE + INT_MAIN_NR) + #define INT_GPIO_NR (28 * 8) -#define NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR) +#define TEGRA_NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR) + +#define INT_BOARD_BASE TEGRA_NR_IRQS +#define NR_BOARD_IRQS 32 + +#define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS) #endif #endif diff --git a/arch/arm/mach-tegra/include/mach/legacy_irq.h b/arch/arm/mach-tegra/include/mach/legacy_irq.h index db1eb3dd04c8..d898c0e3d905 100644 --- a/arch/arm/mach-tegra/include/mach/legacy_irq.h +++ b/arch/arm/mach-tegra/include/mach/legacy_irq.h @@ -27,5 +27,9 @@ int tegra_legacy_force_irq_status(unsigned int irq); void tegra_legacy_select_fiq(unsigned int irq, bool fiq); unsigned long tegra_legacy_vfiq(int nr); unsigned long tegra_legacy_class(int nr); +int tegra_legacy_irq_set_wake(int irq, int enable); +void tegra_legacy_irq_set_lp1_wake_mask(void); +void tegra_legacy_irq_restore_mask(void); +void tegra_init_legacy_irq(void); #endif diff --git a/arch/arm/mach-tegra/include/mach/pinmux-t2.h b/arch/arm/mach-tegra/include/mach/pinmux-t2.h index e5b9d740f973..4c2626347263 100644 --- a/arch/arm/mach-tegra/include/mach/pinmux-t2.h +++ b/arch/arm/mach-tegra/include/mach/pinmux-t2.h @@ -167,6 +167,16 @@ enum tegra_drive_pingroup { TEGRA_DRIVE_PINGROUP_XM2D, TEGRA_DRIVE_PINGROUP_XM2CLK, TEGRA_DRIVE_PINGROUP_MEMCOMP, + TEGRA_DRIVE_PINGROUP_SDIO1, + TEGRA_DRIVE_PINGROUP_CRT, + TEGRA_DRIVE_PINGROUP_DDC, + TEGRA_DRIVE_PINGROUP_GMA, + TEGRA_DRIVE_PINGROUP_GMB, + TEGRA_DRIVE_PINGROUP_GMC, + TEGRA_DRIVE_PINGROUP_GMD, + TEGRA_DRIVE_PINGROUP_GME, + TEGRA_DRIVE_PINGROUP_OWR, + TEGRA_DRIVE_PINGROUP_UAD, TEGRA_MAX_DRIVE_PINGROUP, }; diff --git a/arch/arm/mach-tegra/include/mach/powergate.h b/arch/arm/mach-tegra/include/mach/powergate.h new file mode 100644 index 000000000000..401d1b725291 --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/powergate.h @@ -0,0 +1,40 @@ +/* + * drivers/regulator/tegra-regulator.c + * + * Copyright (c) 2010 Google, Inc + * + * Author: + * Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MACH_TEGRA_POWERGATE_H_ +#define _MACH_TEGRA_POWERGATE_H_ + +#define TEGRA_POWERGATE_CPU 0 +#define TEGRA_POWERGATE_3D 1 +#define TEGRA_POWERGATE_VENC 2 +#define TEGRA_POWERGATE_PCIE 3 +#define TEGRA_POWERGATE_VDEC 4 +#define TEGRA_POWERGATE_L2 5 +#define TEGRA_POWERGATE_MPE 6 +#define TEGRA_NUM_POWERGATE 7 + +int tegra_powergate_power_on(int id); +int tegra_powergate_power_off(int id); +bool tegra_powergate_is_powered(int id); +int tegra_powergate_remove_clamping(int id); + +/* Must be called with clk disabled, and returns with clk enabled */ +int tegra_powergate_sequence_power_up(int id, struct clk *clk); + +#endif /* _MACH_TEGRA_POWERGATE_H_ */ diff --git a/arch/arm/mach-tegra/include/mach/suspend.h b/arch/arm/mach-tegra/include/mach/suspend.h new file mode 100644 index 000000000000..5af8715d2e1e --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/suspend.h @@ -0,0 +1,38 @@ +/* + * arch/arm/mach-tegra/include/mach/suspend.h + * + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#ifndef _MACH_TEGRA_SUSPEND_H_ +#define _MACH_TEGRA_SUSPEND_H_ + +void tegra_pinmux_suspend(void); +void tegra_irq_suspend(void); +void tegra_gpio_suspend(void); +void tegra_clk_suspend(void); +void tegra_dma_suspend(void); +void tegra_timer_suspend(void); + +void tegra_pinmux_resume(void); +void tegra_irq_resume(void); +void tegra_gpio_resume(void); +void tegra_clk_resume(void); +void tegra_dma_resume(void); +void tegra_timer_resume(void); + +#endif /* _MACH_TEGRA_SUSPEND_H_ */ diff --git a/arch/arm/mach-tegra/include/mach/system.h b/arch/arm/mach-tegra/include/mach/system.h index 84d5d46113f7..d0183d876c3b 100644 --- a/arch/arm/mach-tegra/include/mach/system.h +++ b/arch/arm/mach-tegra/include/mach/system.h @@ -24,16 +24,10 @@ #include <mach/hardware.h> #include <mach/iomap.h> -static inline void arch_idle(void) -{ -} +extern void (*arch_reset)(char mode, const char *cmd); -static inline void arch_reset(char mode, const char *cmd) +static inline void arch_idle(void) { - void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04); - u32 reg = readl(reset); - reg |= 0x04; - writel(reg, reset); } #endif diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h index 6c4dd815abd7..4e8323770c79 100644 --- a/arch/arm/mach-tegra/include/mach/uncompress.h +++ b/arch/arm/mach-tegra/include/mach/uncompress.h @@ -26,23 +26,9 @@ #include <mach/iomap.h> -#if defined(CONFIG_TEGRA_DEBUG_UARTA) -#define DEBUG_UART_BASE TEGRA_UARTA_BASE -#elif defined(CONFIG_TEGRA_DEBUG_UARTB) -#define DEBUG_UART_BASE TEGRA_UARTB_BASE -#elif defined(CONFIG_TEGRA_DEBUG_UARTC) -#define DEBUG_UART_BASE TEGRA_UARTC_BASE -#elif defined(CONFIG_TEGRA_DEBUG_UARTD) -#define DEBUG_UART_BASE TEGRA_UARTD_BASE -#elif defined(CONFIG_TEGRA_DEBUG_UARTE) -#define DEBUG_UART_BASE TEGRA_UARTE_BASE -#else -#define DEBUG_UART_BASE NULL -#endif - static void putc(int c) { - volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE; + volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE; int shift = 2; if (uart == NULL) @@ -59,7 +45,7 @@ static inline void flush(void) static inline void arch_decomp_setup(void) { - volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE; + volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE; int shift = 2; if (uart == NULL) diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h new file mode 100644 index 000000000000..d4b8f9e298a8 --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/usb_phy.h @@ -0,0 +1,86 @@ +/* + * arch/arm/mach-tegra/include/mach/usb_phy.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MACH_USB_PHY_H +#define __MACH_USB_PHY_H + +#include <linux/clk.h> +#include <linux/usb/otg.h> + +struct tegra_utmip_config { + u8 hssync_start_delay; + u8 elastic_limit; + u8 idle_wait_delay; + u8 term_range_adj; + u8 xcvr_setup; + u8 xcvr_lsfslew; + u8 xcvr_lsrslew; +}; + +struct tegra_ulpi_config { + int reset_gpio; + const char *clk; +}; + +enum tegra_usb_phy_port_speed { + TEGRA_USB_PHY_PORT_SPEED_FULL = 0, + TEGRA_USB_PHY_PORT_SPEED_LOW, + TEGRA_USB_PHY_PORT_SPEED_HIGH, +}; + +enum tegra_usb_phy_mode { + TEGRA_USB_PHY_MODE_DEVICE, + TEGRA_USB_PHY_MODE_HOST, +}; + +struct tegra_xtal_freq; + +struct tegra_usb_phy { + int instance; + const struct tegra_xtal_freq *freq; + void __iomem *regs; + void __iomem *pad_regs; + struct clk *clk; + struct clk *pll_u; + struct clk *pad_clk; + enum tegra_usb_phy_mode mode; + void *config; + struct otg_transceiver *ulpi; +}; + +struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, + void *config, enum tegra_usb_phy_mode phy_mode); + +int tegra_usb_phy_power_on(struct tegra_usb_phy *phy); + +void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy); + +void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy); + +void tegra_usb_phy_power_off(struct tegra_usb_phy *phy); + +void tegra_usb_phy_preresume(struct tegra_usb_phy *phy); + +void tegra_usb_phy_postresume(struct tegra_usb_phy *phy); + +void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy, + enum tegra_usb_phy_port_speed port_speed); + +void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy); + +void tegra_usb_phy_close(struct tegra_usb_phy *phy); + +#endif /* __MACH_USB_PHY_H */ diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c index 17c74d21077c..dfbc219ea492 100644 --- a/arch/arm/mach-tegra/irq.c +++ b/arch/arm/mach-tegra/irq.c @@ -18,6 +18,7 @@ */ #include <linux/kernel.h> +#include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> @@ -26,73 +27,119 @@ #include <asm/hardware/gic.h> #include <mach/iomap.h> +#include <mach/legacy_irq.h> +#include <mach/suspend.h> #include "board.h" -#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE) -#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE) -#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ) +#define PMC_CTRL 0x0 +#define PMC_CTRL_LATCH_WAKEUPS (1 << 5) +#define PMC_WAKE_MASK 0xc +#define PMC_WAKE_LEVEL 0x10 +#define PMC_WAKE_STATUS 0x14 +#define PMC_SW_WAKE_STATUS 0x18 +#define PMC_DPD_SAMPLE 0x20 -#define APBDMA_IRQ_STA_CPU 0x14 -#define APBDMA_IRQ_MASK_SET 0x20 -#define APBDMA_IRQ_MASK_CLR 0x24 +static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); -#define ICTLR_CPU_IER 0x20 -#define ICTLR_CPU_IER_SET 0x24 -#define ICTLR_CPU_IER_CLR 0x28 -#define ICTLR_CPU_IEP_CLASS 0x2c -#define ICTLR_COP_IER 0x30 -#define ICTLR_COP_IER_SET 0x34 -#define ICTLR_COP_IER_CLR 0x38 -#define ICTLR_COP_IEP_CLASS 0x3c +static u32 tegra_lp0_wake_enb; +static u32 tegra_lp0_wake_level; +static u32 tegra_lp0_wake_level_any; static void (*tegra_gic_mask_irq)(struct irq_data *d); static void (*tegra_gic_unmask_irq)(struct irq_data *d); +static void (*tegra_gic_ack_irq)(struct irq_data *d); -#define irq_to_ictlr(irq) (((irq) - 32) >> 5) -static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE); -#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr) * 0x100) +/* ensures that sufficient time is passed for a register write to + * serialize into the 32KHz domain */ +static void pmc_32kwritel(u32 val, unsigned long offs) +{ + writel(val, pmc + offs); + udelay(130); +} + +int tegra_set_lp1_wake(int irq, int enable) +{ + return tegra_legacy_irq_set_wake(irq, enable); +} + +void tegra_set_lp0_wake_pads(u32 wake_enb, u32 wake_level, u32 wake_any) +{ + u32 temp; + u32 status; + u32 lvl; + + wake_level &= wake_enb; + wake_any &= wake_enb; + + wake_level |= (tegra_lp0_wake_level & tegra_lp0_wake_enb); + wake_any |= (tegra_lp0_wake_level_any & tegra_lp0_wake_enb); + + wake_enb |= tegra_lp0_wake_enb; + + pmc_32kwritel(0, PMC_SW_WAKE_STATUS); + temp = readl(pmc + PMC_CTRL); + temp |= PMC_CTRL_LATCH_WAKEUPS; + pmc_32kwritel(temp, PMC_CTRL); + temp &= ~PMC_CTRL_LATCH_WAKEUPS; + pmc_32kwritel(temp, PMC_CTRL); + status = readl(pmc + PMC_SW_WAKE_STATUS); + lvl = readl(pmc + PMC_WAKE_LEVEL); + + /* flip the wakeup trigger for any-edge triggered pads + * which are currently asserting as wakeups */ + lvl ^= status; + lvl &= wake_any; + + wake_level |= lvl; + + writel(wake_level, pmc + PMC_WAKE_LEVEL); + /* Enable DPD sample to trigger sampling pads data and direction + * in which pad will be driven during lp0 mode*/ + writel(0x1, pmc + PMC_DPD_SAMPLE); + + writel(wake_enb, pmc + PMC_WAKE_MASK); +} static void tegra_mask(struct irq_data *d) { - void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq)); tegra_gic_mask_irq(d); - writel(1 << (d->irq & 31), addr+ICTLR_CPU_IER_CLR); + tegra_legacy_mask_irq(d->irq); } static void tegra_unmask(struct irq_data *d) { - void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq)); tegra_gic_unmask_irq(d); - writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET); + tegra_legacy_unmask_irq(d->irq); } -#ifdef CONFIG_PM +static void tegra_ack(struct irq_data *d) +{ + tegra_legacy_force_irq_clr(d->irq); + tegra_gic_ack_irq(d); +} -static int tegra_set_wake(struct irq_data *d, unsigned int on) +static int tegra_retrigger(struct irq_data *d) { - return 0; + tegra_legacy_force_irq_set(d->irq); + return 1; } -#endif static struct irq_chip tegra_irq = { - .name = "PPI", - .irq_mask = tegra_mask, - .irq_unmask = tegra_unmask, -#ifdef CONFIG_PM - .irq_set_wake = tegra_set_wake, -#endif + .name = "PPI", + .irq_ack = tegra_ack, + .irq_mask = tegra_mask, + .irq_unmask = tegra_unmask, + .irq_retrigger = tegra_retrigger, }; void __init tegra_init_irq(void) { struct irq_chip *gic; unsigned int i; + int irq; - for (i = 0; i < PPI_NR; i++) { - writel(~0, ictlr_to_virt(i) + ICTLR_CPU_IER_CLR); - writel(0, ictlr_to_virt(i) + ICTLR_CPU_IEP_CLASS); - } + tegra_init_legacy_irq(); gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); @@ -100,72 +147,15 @@ void __init tegra_init_irq(void) gic = get_irq_chip(29); tegra_gic_unmask_irq = gic->irq_unmask; tegra_gic_mask_irq = gic->irq_mask; - tegra_irq.irq_ack = gic->irq_ack; + tegra_gic_ack_irq = gic->irq_ack; #ifdef CONFIG_SMP tegra_irq.irq_set_affinity = gic->irq_set_affinity; #endif - for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) { - set_irq_chip(i, &tegra_irq); - set_irq_handler(i, handle_level_irq); - set_irq_flags(i, IRQF_VALID); + for (i = 0; i < INT_MAIN_NR; i++) { + irq = INT_PRI_BASE + i; + set_irq_chip(irq, &tegra_irq); + set_irq_handler(irq, handle_level_irq); + set_irq_flags(irq, IRQF_VALID); } } - -#ifdef CONFIG_PM -static u32 cop_ier[PPI_NR]; -static u32 cpu_ier[PPI_NR]; -static u32 cpu_iep[PPI_NR]; - -void tegra_irq_suspend(void) -{ - unsigned long flags; - int i; - - for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) { - struct irq_desc *desc = irq_to_desc(i); - if (!desc) - continue; - if (desc->status & IRQ_WAKEUP) { - pr_debug("irq %d is wakeup\n", i); - continue; - } - disable_irq(i); - } - - local_irq_save(flags); - for (i = 0; i < PPI_NR; i++) { - void __iomem *ictlr = ictlr_to_virt(i); - cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER); - cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS); - cop_ier[i] = readl(ictlr + ICTLR_COP_IER); - writel(~0, ictlr + ICTLR_COP_IER_CLR); - } - local_irq_restore(flags); -} - -void tegra_irq_resume(void) -{ - unsigned long flags; - int i; - - local_irq_save(flags); - for (i = 0; i < PPI_NR; i++) { - void __iomem *ictlr = ictlr_to_virt(i); - writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); - writel(~0ul, ictlr + ICTLR_CPU_IER_CLR); - writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); - writel(0, ictlr + ICTLR_COP_IEP_CLASS); - writel(~0ul, ictlr + ICTLR_COP_IER_CLR); - writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET); - } - local_irq_restore(flags); - - for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) { - struct irq_desc *desc = irq_to_desc(i); - if (!desc || (desc->status & IRQ_WAKEUP)) - continue; - enable_irq(i); - } -} -#endif diff --git a/arch/arm/mach-tegra/legacy_irq.c b/arch/arm/mach-tegra/legacy_irq.c index 7cc8601c19ff..38eb719a4f53 100644 --- a/arch/arm/mach-tegra/legacy_irq.c +++ b/arch/arm/mach-tegra/legacy_irq.c @@ -18,17 +18,30 @@ #include <linux/io.h> #include <linux/kernel.h> #include <mach/iomap.h> +#include <mach/irqs.h> #include <mach/legacy_irq.h> -#define ICTLR_CPU_IER 0x20 -#define ICTLR_CPU_IER_SET 0x24 -#define ICTLR_CPU_IER_CLR 0x28 -#define ICTLR_CPU_IEP_CLASS 0x2C +#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE) +#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE) +#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ) + #define ICTLR_CPU_IEP_VFIQ 0x08 #define ICTLR_CPU_IEP_FIR 0x14 #define ICTLR_CPU_IEP_FIR_SET 0x18 #define ICTLR_CPU_IEP_FIR_CLR 0x1c +#define ICTLR_CPU_IER 0x20 +#define ICTLR_CPU_IER_SET 0x24 +#define ICTLR_CPU_IER_CLR 0x28 +#define ICTLR_CPU_IEP_CLASS 0x2C + +#define ICTLR_COP_IER 0x30 +#define ICTLR_COP_IER_SET 0x34 +#define ICTLR_COP_IER_CLR 0x38 +#define ICTLR_COP_IEP_CLASS 0x3c + +#define NUM_ICTLRS 4 + static void __iomem *ictlr_reg_base[] = { IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE), IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE), @@ -36,6 +49,9 @@ static void __iomem *ictlr_reg_base[] = { IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE), }; +static u32 tegra_legacy_wake_mask[4]; +static u32 tegra_legacy_saved_mask[4]; + /* When going into deep sleep, the CPU is powered down, taking the GIC with it In order to wake, the wake interrupts need to be enabled in the legacy interrupt controller. */ @@ -112,3 +128,88 @@ unsigned long tegra_legacy_class(int nr) base = ictlr_reg_base[nr]; return readl(base + ICTLR_CPU_IEP_CLASS); } + +int tegra_legacy_irq_set_wake(int irq, int enable) +{ + irq -= 32; + if (enable) + tegra_legacy_wake_mask[irq >> 5] |= 1 << (irq & 31); + else + tegra_legacy_wake_mask[irq >> 5] &= ~(1 << (irq & 31)); + + return 0; +} + +void tegra_legacy_irq_set_lp1_wake_mask(void) +{ + void __iomem *base; + int i; + + for (i = 0; i < NUM_ICTLRS; i++) { + base = ictlr_reg_base[i]; + tegra_legacy_saved_mask[i] = readl(base + ICTLR_CPU_IER); + writel(tegra_legacy_wake_mask[i], base + ICTLR_CPU_IER); + } +} + +void tegra_legacy_irq_restore_mask(void) +{ + void __iomem *base; + int i; + + for (i = 0; i < NUM_ICTLRS; i++) { + base = ictlr_reg_base[i]; + writel(tegra_legacy_saved_mask[i], base + ICTLR_CPU_IER); + } +} + +void tegra_init_legacy_irq(void) +{ + int i; + + for (i = 0; i < NUM_ICTLRS; i++) { + void __iomem *ictlr = ictlr_reg_base[i]; + writel(~0, ictlr + ICTLR_CPU_IER_CLR); + writel(0, ictlr + ICTLR_CPU_IEP_CLASS); + } +} + +#ifdef CONFIG_PM +static u32 cop_ier[NUM_ICTLRS]; +static u32 cpu_ier[NUM_ICTLRS]; +static u32 cpu_iep[NUM_ICTLRS]; + +void tegra_irq_suspend(void) +{ + unsigned long flags; + int i; + + local_irq_save(flags); + for (i = 0; i < NUM_ICTLRS; i++) { + void __iomem *ictlr = ictlr_reg_base[i]; + cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER); + cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS); + cop_ier[i] = readl(ictlr + ICTLR_COP_IER); + writel(~0, ictlr + ICTLR_COP_IER_CLR); + } + local_irq_restore(flags); +} + +void tegra_irq_resume(void) +{ + unsigned long flags; + int i; + + local_irq_save(flags); + for (i = 0; i < NUM_ICTLRS; i++) { + void __iomem *ictlr = ictlr_reg_base[i]; + writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); + writel(~0ul, ictlr + ICTLR_CPU_IER_CLR); + writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); + writel(0, ictlr + ICTLR_COP_IEP_CLASS); + writel(~0ul, ictlr + ICTLR_COP_IER_CLR); + writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET); + } + local_irq_restore(flags); +} +#endif diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c index 53f5fa37014a..2941212b853c 100644 --- a/arch/arm/mach-tegra/pcie.c +++ b/arch/arm/mach-tegra/pcie.c @@ -39,6 +39,7 @@ #include <mach/pinmux.h> #include <mach/iomap.h> #include <mach/clk.h> +#include <mach/powergate.h> /* register definitions */ #define AFI_OFFSET 0x3800 @@ -682,24 +683,41 @@ static void tegra_pcie_xclk_clamp(bool clamp) pmc_writel(reg, PMC_SCRATCH42); } -static int tegra_pcie_power_on(void) +static void tegra_pcie_power_off(void) { - tegra_pcie_xclk_clamp(true); tegra_periph_reset_assert(tegra_pcie.pcie_xclk); - tegra_pcie_xclk_clamp(false); + tegra_periph_reset_assert(tegra_pcie.afi_clk); + tegra_periph_reset_assert(tegra_pcie.pex_clk); - clk_enable(tegra_pcie.afi_clk); - clk_enable(tegra_pcie.pex_clk); - return clk_enable(tegra_pcie.pll_e); + tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); + tegra_pcie_xclk_clamp(true); } -static void tegra_pcie_power_off(void) +static int tegra_pcie_power_regate(void) { + int err; + + tegra_pcie_power_off(); + + tegra_pcie_xclk_clamp(true); + tegra_periph_reset_assert(tegra_pcie.pcie_xclk); tegra_periph_reset_assert(tegra_pcie.afi_clk); - tegra_periph_reset_assert(tegra_pcie.pex_clk); - tegra_pcie_xclk_clamp(true); + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, + tegra_pcie.pex_clk); + if (err) { + pr_err("PCIE: powerup sequence failed: %d\n", err); + return err; + } + + tegra_periph_reset_deassert(tegra_pcie.afi_clk); + + tegra_pcie_xclk_clamp(false); + + clk_enable(tegra_pcie.afi_clk); + clk_enable(tegra_pcie.pex_clk); + return clk_enable(tegra_pcie.pll_e); } static int tegra_pcie_clocks_get(void) @@ -759,7 +777,7 @@ static int __init tegra_pcie_get_resources(void) return err; } - err = tegra_pcie_power_on(); + err = tegra_pcie_power_regate(); if (err) { pr_err("PCIE: failed to power up: %d\n", err); goto err_pwr_on; diff --git a/arch/arm/mach-tegra/pinmux-t2-tables.c b/arch/arm/mach-tegra/pinmux-t2-tables.c index a6ea34e782dc..a475367befa3 100644 --- a/arch/arm/mach-tegra/pinmux-t2-tables.c +++ b/arch/arm/mach-tegra/pinmux-t2-tables.c @@ -29,6 +29,7 @@ #include <mach/iomap.h> #include <mach/pinmux.h> +#include <mach/suspend.h> #define DRIVE_PINGROUP(pg_name, r) \ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \ @@ -65,6 +66,16 @@ const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE DRIVE_PINGROUP(XM2D, 0x8cc), DRIVE_PINGROUP(XM2CLK, 0x8d0), DRIVE_PINGROUP(MEMCOMP, 0x8d4), + DRIVE_PINGROUP(SDIO1, 0x8e0), + DRIVE_PINGROUP(CRT, 0x8ec), + DRIVE_PINGROUP(DDC, 0x8f0), + DRIVE_PINGROUP(GMA, 0x8f4), + DRIVE_PINGROUP(GMB, 0x8f8), + DRIVE_PINGROUP(GMC, 0x8fc), + DRIVE_PINGROUP(GMD, 0x900), + DRIVE_PINGROUP(GME, 0x904), + DRIVE_PINGROUP(OWR, 0x908), + DRIVE_PINGROUP(UAD, 0x90c), }; #define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \ @@ -216,7 +227,8 @@ const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = { #define PULLUPDOWN_REG_NUM 5 static u32 pinmux_reg[TRISTATE_REG_NUM + PIN_MUX_CTL_REG_NUM + - PULLUPDOWN_REG_NUM]; + PULLUPDOWN_REG_NUM + + ARRAY_SIZE(tegra_soc_drive_pingroups)]; static inline unsigned long pg_readl(unsigned long offset) { @@ -233,14 +245,17 @@ void tegra_pinmux_suspend(void) unsigned int i; u32 *ctx = pinmux_reg; - for (i = 0; i < TRISTATE_REG_NUM; i++) - *ctx++ = pg_readl(TRISTATE_REG_A + i*4); - for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++) *ctx++ = pg_readl(PIN_MUX_CTL_REG_A + i*4); for (i = 0; i < PULLUPDOWN_REG_NUM; i++) *ctx++ = pg_readl(PULLUPDOWN_REG_A + i*4); + + for (i = 0; i < TRISTATE_REG_NUM; i++) + *ctx++ = pg_readl(TRISTATE_REG_A + i*4); + + for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++) + *ctx++ = pg_readl(tegra_soc_drive_pingroups[i].reg); } void tegra_pinmux_resume(void) @@ -256,5 +271,8 @@ void tegra_pinmux_resume(void) for (i = 0; i < TRISTATE_REG_NUM; i++) pg_writel(*ctx++, TRISTATE_REG_A + i*4); + + for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++) + pg_writel(*ctx++, tegra_soc_drive_pingroups[i].reg); } #endif diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c new file mode 100644 index 000000000000..3cee9aa1f2c8 --- /dev/null +++ b/arch/arm/mach-tegra/powergate.c @@ -0,0 +1,212 @@ +/* + * drivers/powergate/tegra-powergate.c + * + * Copyright (c) 2010 Google, Inc + * + * Author: + * Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> + +#include <mach/clk.h> +#include <mach/iomap.h> +#include <mach/powergate.h> + +#define PWRGATE_TOGGLE 0x30 +#define PWRGATE_TOGGLE_START (1 << 8) + +#define REMOVE_CLAMPING 0x34 + +#define PWRGATE_STATUS 0x38 + +static DEFINE_SPINLOCK(tegra_powergate_lock); + +static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); + +static u32 pmc_read(unsigned long reg) +{ + return readl(pmc + reg); +} + +static void pmc_write(u32 val, unsigned long reg) +{ + writel(val, pmc + reg); +} + +static int tegra_powergate_set(int id, bool new_state) +{ + bool status; + unsigned long flags; + + spin_lock_irqsave(&tegra_powergate_lock, flags); + + status = pmc_read(PWRGATE_STATUS) & (1 << id); + + if (status == new_state) { + spin_unlock_irqrestore(&tegra_powergate_lock, flags); + return -EINVAL; + } + + pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); + + spin_unlock_irqrestore(&tegra_powergate_lock, flags); + + return 0; +} + +int tegra_powergate_power_on(int id) +{ + if (id < 0 || id >= TEGRA_NUM_POWERGATE) + return -EINVAL; + + return tegra_powergate_set(id, true); +} + +int tegra_powergate_power_off(int id) +{ + if (id < 0 || id >= TEGRA_NUM_POWERGATE) + return -EINVAL; + + return tegra_powergate_set(id, false); +} + +bool tegra_powergate_is_powered(int id) +{ + u32 status; + + if (id < 0 || id >= TEGRA_NUM_POWERGATE) + return -EINVAL; + + status = pmc_read(PWRGATE_STATUS) & (1 << id); + return !!status; +} + +int tegra_powergate_remove_clamping(int id) +{ + u32 mask; + + if (id < 0 || id >= TEGRA_NUM_POWERGATE) + return -EINVAL; + + /* + * Tegra 2 has a bug where PCIE and VDE clamping masks are + * swapped relatively to the partition ids + */ + if (id == TEGRA_POWERGATE_VDEC) + mask = (1 << TEGRA_POWERGATE_PCIE); + else if (id == TEGRA_POWERGATE_PCIE) + mask = (1 << TEGRA_POWERGATE_VDEC); + else + mask = (1 << id); + + pmc_write(mask, REMOVE_CLAMPING); + + return 0; +} + +/* Must be called with clk disabled, and returns with clk enabled */ +int tegra_powergate_sequence_power_up(int id, struct clk *clk) +{ + int ret; + + tegra_periph_reset_assert(clk); + + ret = tegra_powergate_power_on(id); + if (ret) + goto err_power; + + ret = clk_enable(clk); + if (ret) + goto err_clk; + + udelay(10); + + ret = tegra_powergate_remove_clamping(id); + if (ret) + goto err_clamp; + + udelay(10); + tegra_periph_reset_deassert(clk); + + return 0; + +err_clamp: + clk_disable(clk); +err_clk: + tegra_powergate_power_off(id); +err_power: + return ret; +} + +#ifdef CONFIG_DEBUG_FS + +static const char * const powergate_name[] = { + [TEGRA_POWERGATE_CPU] = "cpu", + [TEGRA_POWERGATE_3D] = "3d", + [TEGRA_POWERGATE_VENC] = "venc", + [TEGRA_POWERGATE_VDEC] = "vdec", + [TEGRA_POWERGATE_PCIE] = "pcie", + [TEGRA_POWERGATE_L2] = "l2", + [TEGRA_POWERGATE_MPE] = "mpe", +}; + +static int powergate_show(struct seq_file *s, void *data) +{ + int i; + + seq_printf(s, " powergate powered\n"); + seq_printf(s, "------------------\n"); + + for (i = 0; i < TEGRA_NUM_POWERGATE; i++) + seq_printf(s, " %9s %7s\n", powergate_name[i], + tegra_powergate_is_powered(i) ? "yes" : "no"); + return 0; +} + +static int powergate_open(struct inode *inode, struct file *file) +{ + return single_open(file, powergate_show, inode->i_private); +} + +static const struct file_operations powergate_fops = { + .open = powergate_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init powergate_debugfs_init(void) +{ + struct dentry *d; + int err = -ENOMEM; + + d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL, + &powergate_fops); + if (!d) + return -ENOMEM; + + return err; +} + +late_initcall(powergate_debugfs_init); + +#endif diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index f0dae6d8ba52..6d7c4eea4dcb 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c @@ -23,14 +23,15 @@ #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/io.h> -#include <linux/hrtimer.h> #include <linux/clkdev.h> +#include <linux/clk.h> #include <mach/iomap.h> +#include <mach/suspend.h> #include "clock.h" #include "fuse.h" -#include "tegra2_dvfs.h" +#include "tegra2_emc.h" #define RST_DEVICES 0x004 #define RST_DEVICES_SET 0x300 @@ -51,7 +52,7 @@ #define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30) #define OSC_CTRL_OSC_FREQ_12MHZ (2<<30) #define OSC_CTRL_OSC_FREQ_26MHZ (3<<30) -#define OSC_CTRL_MASK 0x3f2 +#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK) #define OSC_FREQ_DET 0x58 #define OSC_FREQ_DET_TRIG (1<<31) @@ -73,12 +74,15 @@ #define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF #define PERIPH_CLK_SOURCE_DIV_SHIFT 0 +#define SDMMC_CLK_INT_FB_SEL (1 << 23) +#define SDMMC_CLK_INT_FB_DLY_SHIFT 16 +#define SDMMC_CLK_INT_FB_DLY_MASK (0xF << SDMMC_CLK_INT_FB_DLY_SHIFT) + #define PLL_BASE 0x0 #define PLL_BASE_BYPASS (1<<31) #define PLL_BASE_ENABLE (1<<30) #define PLL_BASE_REF_ENABLE (1<<29) #define PLL_BASE_OVERRIDE (1<<28) -#define PLL_BASE_LOCK (1<<27) #define PLL_BASE_DIVP_MASK (0x7<<20) #define PLL_BASE_DIVP_SHIFT 20 #define PLL_BASE_DIVN_MASK (0x3FF<<8) @@ -93,7 +97,6 @@ #define PLL_OUT_RESET_DISABLE (1<<0) #define PLL_MISC(c) (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc) -#define PLL_MISC_LOCK_ENABLE(c) (((c)->flags & PLLU) ? (1<<22) : (1<<18)) #define PLL_MISC_DCCON_SHIFT 20 #define PLL_MISC_CPCON_SHIFT 8 @@ -111,9 +114,9 @@ #define PLLE_MISC_READY (1 << 15) -#define PERIPH_CLK_TO_ENB_REG(c) ((c->clk_num / 32) * 4) -#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->clk_num / 32) * 8) -#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->clk_num % 32)) +#define PERIPH_CLK_TO_ENB_REG(c) ((c->u.periph.clk_num / 32) * 4) +#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->u.periph.clk_num / 32) * 8) +#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->u.periph.clk_num % 32)) #define SUPER_CLK_MUX 0x00 #define SUPER_STATE_SHIFT 28 @@ -134,12 +137,42 @@ #define BUS_CLK_DISABLE (1<<3) #define BUS_CLK_DIV_MASK 0x3 +#define PMC_CTRL 0x0 + #define PMC_CTRL_BLINK_ENB (1 << 7) + +#define PMC_DPD_PADS_ORIDE 0x1c + #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20) + +#define PMC_BLINK_TIMER_DATA_ON_SHIFT 0 +#define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff +#define PMC_BLINK_TIMER_ENB (1 << 15) +#define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16 +#define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff + static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE); +static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); + +/* + * Some clocks share a register with other clocks. Any clock op that + * non-atomically modifies a register used by another clock must lock + * clock_register_lock first. + */ +static DEFINE_SPINLOCK(clock_register_lock); + +/* + * Some peripheral clocks share an enable bit, so refcount the enable bits + * in registers CLK_ENABLE_L, CLK_ENABLE_H, and CLK_ENABLE_U + */ +static int tegra_periph_clk_enable_refcount[3 * 32]; #define clk_writel(value, reg) \ __raw_writel(value, (u32)reg_clk_base + (reg)) #define clk_readl(reg) \ __raw_readl((u32)reg_clk_base + (reg)) +#define pmc_writel(value, reg) \ + __raw_writel(value, (u32)reg_pmc_base + (reg)) +#define pmc_readl(reg) \ + __raw_readl((u32)reg_pmc_base + (reg)) unsigned long clk_measure_input_freq(void) { @@ -245,6 +278,18 @@ static struct clk_ops tegra_clk_m_ops = { .disable = tegra2_clk_m_disable, }; +void tegra2_periph_reset_assert(struct clk *c) +{ + BUG_ON(!c->ops->reset); + c->ops->reset(c, true); +} + +void tegra2_periph_reset_deassert(struct clk *c) +{ + BUG_ON(!c->ops->reset); + c->ops->reset(c, false); +} + /* super clock functions */ /* "super clocks" on tegra have two-stage muxes and a clock skipping * super divider. We will ignore the clock skipping divider, since we @@ -303,12 +348,12 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p) val |= sel->value << shift; if (c->refcnt) - clk_enable_locked(p); + clk_enable(p); clk_writel(val, c->reg); if (c->refcnt && c->parent) - clk_disable_locked(c->parent); + clk_disable(c->parent); clk_reparent(c, p); return 0; @@ -317,11 +362,24 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p) return -EINVAL; } +/* + * Super clocks have "clock skippers" instead of dividers. Dividing using + * a clock skipper does not allow the voltage to be scaled down, so instead + * adjust the rate of the parent clock. This requires that the parent of a + * super clock have no other children, otherwise the rate will change + * underneath the other children. + */ +static int tegra2_super_clk_set_rate(struct clk *c, unsigned long rate) +{ + return clk_set_rate(c->parent, rate); +} + static struct clk_ops tegra_super_ops = { .init = tegra2_super_clk_init, .enable = tegra2_super_clk_enable, .disable = tegra2_super_clk_disable, .set_parent = tegra2_super_clk_set_parent, + .set_rate = tegra2_super_clk_set_rate, }; /* virtual cpu clock functions */ @@ -351,25 +409,36 @@ static void tegra2_cpu_clk_disable(struct clk *c) static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate) { int ret; - ret = clk_set_parent_locked(c->parent, c->backup); + /* + * Take an extra reference to the main pll so it doesn't turn + * off when we move the cpu off of it + */ + clk_enable(c->u.cpu.main); + + ret = clk_set_parent(c->parent, c->u.cpu.backup); if (ret) { - pr_err("Failed to switch cpu to clock %s\n", c->backup->name); - return ret; + pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name); + goto out; } - ret = clk_set_rate_locked(c->main, rate); + if (rate == clk_get_rate(c->u.cpu.backup)) + goto out; + + ret = clk_set_rate(c->u.cpu.main, rate); if (ret) { pr_err("Failed to change cpu pll to %lu\n", rate); - return ret; + goto out; } - ret = clk_set_parent_locked(c->parent, c->main); + ret = clk_set_parent(c->parent, c->u.cpu.main); if (ret) { - pr_err("Failed to switch cpu to clock %s\n", c->main->name); - return ret; + pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name); + goto out; } - return 0; +out: + clk_disable(c->u.cpu.main); + return ret; } static struct clk_ops tegra_cpu_ops = { @@ -379,6 +448,20 @@ static struct clk_ops tegra_cpu_ops = { .set_rate = tegra2_cpu_clk_set_rate, }; +/* virtual cop clock functions. Used to acquire the fake 'cop' clock to + * reset the COP block (i.e. AVP) */ +static void tegra2_cop_clk_reset(struct clk *c, bool assert) +{ + unsigned long reg = assert ? RST_DEVICES_SET : RST_DEVICES_CLR; + + pr_debug("%s %s\n", __func__, assert ? "assert" : "deassert"); + clk_writel(1 << 1, reg); +} + +static struct clk_ops tegra_cop_ops = { + .reset = tegra2_cop_clk_reset, +}; + /* bus clock functions */ static void tegra2_bus_clk_init(struct clk *c) { @@ -390,24 +473,45 @@ static void tegra2_bus_clk_init(struct clk *c) static int tegra2_bus_clk_enable(struct clk *c) { - u32 val = clk_readl(c->reg); + u32 val; + unsigned long flags; + + spin_lock_irqsave(&clock_register_lock, flags); + + val = clk_readl(c->reg); val &= ~(BUS_CLK_DISABLE << c->reg_shift); clk_writel(val, c->reg); + + spin_unlock_irqrestore(&clock_register_lock, flags); + return 0; } static void tegra2_bus_clk_disable(struct clk *c) { - u32 val = clk_readl(c->reg); + u32 val; + unsigned long flags; + + spin_lock_irqsave(&clock_register_lock, flags); + + val = clk_readl(c->reg); val |= BUS_CLK_DISABLE << c->reg_shift; clk_writel(val, c->reg); + + spin_unlock_irqrestore(&clock_register_lock, flags); } static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate) { - u32 val = clk_readl(c->reg); - unsigned long parent_rate = c->parent->rate; + u32 val; + unsigned long parent_rate = clk_get_rate(c->parent); + unsigned long flags; + int ret = -EINVAL; int i; + + spin_lock_irqsave(&clock_register_lock, flags); + + val = clk_readl(c->reg); for (i = 1; i <= 4; i++) { if (rate == parent_rate / i) { val &= ~(BUS_CLK_DIV_MASK << c->reg_shift); @@ -415,10 +519,14 @@ static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate) clk_writel(val, c->reg); c->div = i; c->mul = 1; - return 0; + ret = 0; + break; } } - return -EINVAL; + + spin_unlock_irqrestore(&clock_register_lock, flags); + + return ret; } static struct clk_ops tegra_bus_ops = { @@ -428,24 +536,96 @@ static struct clk_ops tegra_bus_ops = { .set_rate = tegra2_bus_clk_set_rate, }; -/* PLL Functions */ -static int tegra2_pll_clk_wait_for_lock(struct clk *c) +/* Blink output functions */ + +static void tegra2_blink_clk_init(struct clk *c) { - ktime_t before; + u32 val; - before = ktime_get(); + val = pmc_readl(PMC_CTRL); + c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF; + c->mul = 1; + val = pmc_readl(c->reg); + + if (val & PMC_BLINK_TIMER_ENB) { + unsigned int on_off; + + on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) & + PMC_BLINK_TIMER_DATA_ON_MASK; + val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT; + val &= PMC_BLINK_TIMER_DATA_OFF_MASK; + on_off += val; + /* each tick in the blink timer is 4 32KHz clocks */ + c->div = on_off * 4; + } else { + c->div = 1; + } +} - while (!(clk_readl(c->reg + PLL_BASE) & PLL_BASE_LOCK)) { - if (ktime_us_delta(ktime_get(), before) > 5000) { - pr_err("Timed out waiting for lock bit on pll %s", - c->name); - return -1; - } +static int tegra2_blink_clk_enable(struct clk *c) +{ + u32 val; + + val = pmc_readl(PMC_DPD_PADS_ORIDE); + pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); + + val = pmc_readl(PMC_CTRL); + pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL); + + return 0; +} + +static void tegra2_blink_clk_disable(struct clk *c) +{ + u32 val; + + val = pmc_readl(PMC_CTRL); + pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL); + + val = pmc_readl(PMC_DPD_PADS_ORIDE); + pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); +} + +static int tegra2_blink_clk_set_rate(struct clk *c, unsigned long rate) +{ + unsigned long parent_rate = clk_get_rate(c->parent); + if (rate >= parent_rate) { + c->div = 1; + pmc_writel(0, c->reg); + } else { + unsigned int on_off; + u32 val; + + on_off = DIV_ROUND_UP(parent_rate / 8, rate); + c->div = on_off * 8; + + val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) << + PMC_BLINK_TIMER_DATA_ON_SHIFT; + on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK; + on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT; + val |= on_off; + val |= PMC_BLINK_TIMER_ENB; + pmc_writel(val, c->reg); } return 0; } +static struct clk_ops tegra_blink_clk_ops = { + .init = &tegra2_blink_clk_init, + .enable = &tegra2_blink_clk_enable, + .disable = &tegra2_blink_clk_disable, + .set_rate = &tegra2_blink_clk_set_rate, +}; + +/* PLL Functions */ +static int tegra2_pll_clk_wait_for_lock(struct clk *c) +{ + udelay(c->u.pll.lock_delay); + + return 0; +} + static void tegra2_pll_clk_init(struct clk *c) { u32 val = clk_readl(c->reg + PLL_BASE); @@ -479,10 +659,6 @@ static int tegra2_pll_clk_enable(struct clk *c) val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); - val = clk_readl(c->reg + PLL_MISC(c)); - val |= PLL_MISC_LOCK_ENABLE(c); - clk_writel(val, c->reg + PLL_MISC(c)); - tegra2_pll_clk_wait_for_lock(c); return 0; @@ -502,13 +678,12 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate) { u32 val; unsigned long input_rate; - const struct clk_pll_table *sel; + const struct clk_pll_freq_table *sel; pr_debug("%s: %s %lu\n", __func__, c->name, rate); - BUG_ON(c->refcnt != 0); - input_rate = c->parent->rate; - for (sel = c->pll_table; sel->input_rate != 0; sel++) { + input_rate = clk_get_rate(c->parent); + for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == rate) { c->mul = sel->n; c->div = sel->m * sel->p; @@ -620,9 +795,11 @@ static int tegra2_pll_div_clk_enable(struct clk *c) { u32 val; u32 new_val; + unsigned long flags; pr_debug("%s: %s\n", __func__, c->name); if (c->flags & DIV_U71) { + spin_lock_irqsave(&clock_register_lock, flags); val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; @@ -632,12 +809,15 @@ static int tegra2_pll_div_clk_enable(struct clk *c) val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel(val, c->reg); + spin_unlock_irqrestore(&clock_register_lock, flags); return 0; } else if (c->flags & DIV_2) { BUG_ON(!(c->flags & PLLD)); + spin_lock_irqsave(&clock_register_lock, flags); val = clk_readl(c->reg); val &= ~PLLD_MISC_DIV_RST; clk_writel(val, c->reg); + spin_unlock_irqrestore(&clock_register_lock, flags); return 0; } return -EINVAL; @@ -647,9 +827,11 @@ static void tegra2_pll_div_clk_disable(struct clk *c) { u32 val; u32 new_val; + unsigned long flags; pr_debug("%s: %s\n", __func__, c->name); if (c->flags & DIV_U71) { + spin_lock_irqsave(&clock_register_lock, flags); val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; @@ -659,11 +841,14 @@ static void tegra2_pll_div_clk_disable(struct clk *c) val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel(val, c->reg); + spin_unlock_irqrestore(&clock_register_lock, flags); } else if (c->flags & DIV_2) { BUG_ON(!(c->flags & PLLD)); + spin_lock_irqsave(&clock_register_lock, flags); val = clk_readl(c->reg); val |= PLLD_MISC_DIV_RST; clk_writel(val, c->reg); + spin_unlock_irqrestore(&clock_register_lock, flags); } } @@ -672,10 +857,14 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate) u32 val; u32 new_val; int divider_u71; + unsigned long parent_rate = clk_get_rate(c->parent); + unsigned long flags; + pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { - divider_u71 = clk_div71_get_divider(c->parent->rate, rate); + divider_u71 = clk_div71_get_divider(parent_rate, rate); if (divider_u71 >= 0) { + spin_lock_irqsave(&clock_register_lock, flags); val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; @@ -689,10 +878,11 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate) clk_writel(val, c->reg); c->div = divider_u71 + 2; c->mul = 2; + spin_unlock_irqrestore(&clock_register_lock, flags); return 0; } } else if (c->flags & DIV_2) { - if (c->parent->rate == rate * 2) + if (parent_rate == rate * 2) return 0; } return -EINVAL; @@ -701,15 +891,16 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate) static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate) { int divider; + unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { - divider = clk_div71_get_divider(c->parent->rate, rate); + divider = clk_div71_get_divider(parent_rate, rate); if (divider < 0) return divider; - return c->parent->rate * 2 / (divider + 2); + return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_2) { - return c->parent->rate / 2; + return DIV_ROUND_UP(parent_rate, 2); } return -EINVAL; } @@ -755,9 +946,14 @@ static void tegra2_periph_clk_init(struct clk *c) } c->state = ON; + + if (!c->u.periph.clk_num) + return; + if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_ENB_BIT(c))) c->state = OFF; + if (!(c->flags & PERIPH_NO_RESET)) if (clk_readl(RST_DEVICES + PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_ENB_BIT(c)) @@ -767,8 +963,20 @@ static void tegra2_periph_clk_init(struct clk *c) static int tegra2_periph_clk_enable(struct clk *c) { u32 val; + unsigned long flags; + int refcount; pr_debug("%s on clock %s\n", __func__, c->name); + if (!c->u.periph.clk_num) + return 0; + + spin_lock_irqsave(&clock_register_lock, flags); + + refcount = tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++; + + if (refcount > 1) + goto out; + clk_writel(PERIPH_CLK_TO_ENB_BIT(c), CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET)) @@ -781,34 +989,48 @@ static int tegra2_periph_clk_enable(struct clk *c) val |= 0x3 << 24; clk_writel(val, c->reg); } + +out: + spin_unlock_irqrestore(&clock_register_lock, flags); + return 0; } static void tegra2_periph_clk_disable(struct clk *c) { + unsigned long flags; + pr_debug("%s on clock %s\n", __func__, c->name); - clk_writel(PERIPH_CLK_TO_ENB_BIT(c), - CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); -} + if (!c->u.periph.clk_num) + return; -void tegra2_periph_reset_deassert(struct clk *c) -{ - pr_debug("%s on clock %s\n", __func__, c->name); - if (!(c->flags & PERIPH_NO_RESET)) + spin_lock_irqsave(&clock_register_lock, flags); + + if (c->refcnt) + tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--; + + if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0) clk_writel(PERIPH_CLK_TO_ENB_BIT(c), - RST_DEVICES_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); + CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); + + spin_unlock_irqrestore(&clock_register_lock, flags); } -void tegra2_periph_reset_assert(struct clk *c) +static void tegra2_periph_clk_reset(struct clk *c, bool assert) { - pr_debug("%s on clock %s\n", __func__, c->name); + unsigned long base = assert ? RST_DEVICES_SET : RST_DEVICES_CLR; + + pr_debug("%s %s on clock %s\n", __func__, + assert ? "assert" : "deassert", c->name); + + BUG_ON(!c->u.periph.clk_num); + if (!(c->flags & PERIPH_NO_RESET)) clk_writel(PERIPH_CLK_TO_ENB_BIT(c), - RST_DEVICES_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); + base + PERIPH_CLK_TO_ENB_SET_REG(c)); } - static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p) { u32 val; @@ -821,12 +1043,12 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p) val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT; if (c->refcnt) - clk_enable_locked(p); + clk_enable(p); clk_writel(val, c->reg); if (c->refcnt && c->parent) - clk_disable_locked(c->parent); + clk_disable(c->parent); clk_reparent(c, p); return 0; @@ -840,9 +1062,10 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate) { u32 val; int divider; - pr_debug("%s: %lu\n", __func__, rate); + unsigned long parent_rate = clk_get_rate(c->parent); + if (c->flags & DIV_U71) { - divider = clk_div71_get_divider(c->parent->rate, rate); + divider = clk_div71_get_divider(parent_rate, rate); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK; @@ -853,7 +1076,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate) return 0; } } else if (c->flags & DIV_U16) { - divider = clk_div16_get_divider(c->parent->rate, rate); + divider = clk_div16_get_divider(parent_rate, rate); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK; @@ -863,7 +1086,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate) c->mul = 1; return 0; } - } else if (c->parent->rate <= rate) { + } else if (parent_rate <= rate) { c->div = 1; c->mul = 1; return 0; @@ -875,19 +1098,20 @@ static long tegra2_periph_clk_round_rate(struct clk *c, unsigned long rate) { int divider; + unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { - divider = clk_div71_get_divider(c->parent->rate, rate); + divider = clk_div71_get_divider(parent_rate, rate); if (divider < 0) return divider; - return c->parent->rate * 2 / (divider + 2); + return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_U16) { - divider = clk_div16_get_divider(c->parent->rate, rate); + divider = clk_div16_get_divider(parent_rate, rate); if (divider < 0) return divider; - return c->parent->rate / (divider + 1); + return DIV_ROUND_UP(parent_rate, divider + 1); } return -EINVAL; } @@ -899,6 +1123,71 @@ static struct clk_ops tegra_periph_clk_ops = { .set_parent = &tegra2_periph_clk_set_parent, .set_rate = &tegra2_periph_clk_set_rate, .round_rate = &tegra2_periph_clk_round_rate, + .reset = &tegra2_periph_clk_reset, +}; + +/* The SDMMC controllers have extra bits in the clock source register that + * adjust the delay between the clock and data to compenstate for delays + * on the PCB. */ +void tegra2_sdmmc_tap_delay(struct clk *c, int delay) +{ + u32 reg; + + delay = clamp(delay, 0, 15); + reg = clk_readl(c->reg); + reg &= ~SDMMC_CLK_INT_FB_DLY_MASK; + reg |= SDMMC_CLK_INT_FB_SEL; + reg |= delay << SDMMC_CLK_INT_FB_DLY_SHIFT; + clk_writel(reg, c->reg); +} + +/* External memory controller clock ops */ +static void tegra2_emc_clk_init(struct clk *c) +{ + tegra2_periph_clk_init(c); + c->max_rate = clk_get_rate_locked(c); +} + +static long tegra2_emc_clk_round_rate(struct clk *c, unsigned long rate) +{ + long new_rate = rate; + + new_rate = tegra_emc_round_rate(new_rate); + if (new_rate < 0) + return c->max_rate; + + BUG_ON(new_rate != tegra2_periph_clk_round_rate(c, new_rate)); + + return new_rate; +} + +static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate) +{ + int ret; + /* + * The Tegra2 memory controller has an interlock with the clock + * block that allows memory shadowed registers to be updated, + * and then transfer them to the main registers at the same + * time as the clock update without glitches. + */ + ret = tegra_emc_set_rate(rate); + if (ret < 0) + return ret; + + ret = tegra2_periph_clk_set_rate(c, rate); + udelay(1); + + return ret; +} + +static struct clk_ops tegra_emc_clk_ops = { + .init = &tegra2_emc_clk_init, + .enable = &tegra2_periph_clk_enable, + .disable = &tegra2_periph_clk_disable, + .set_parent = &tegra2_periph_clk_set_parent, + .set_rate = &tegra2_emc_clk_set_rate, + .round_rate = &tegra2_emc_clk_round_rate, + .reset = &tegra2_periph_clk_reset, }; /* Clock doubler ops */ @@ -907,6 +1196,10 @@ static void tegra2_clk_double_init(struct clk *c) c->mul = 2; c->div = 1; c->state = ON; + + if (!c->u.periph.clk_num) + return; + if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_ENB_BIT(c))) c->state = OFF; @@ -914,7 +1207,7 @@ static void tegra2_clk_double_init(struct clk *c) static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate) { - if (rate != 2 * c->parent->rate) + if (rate != 2 * clk_get_rate(c->parent)) return -EINVAL; c->mul = 2; c->div = 1; @@ -928,6 +1221,7 @@ static struct clk_ops tegra_clk_double_ops = { .set_rate = &tegra2_clk_double_set_rate, }; +/* Audio sync clock ops */ static void tegra2_audio_sync_clk_init(struct clk *c) { int source; @@ -964,12 +1258,12 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p) val |= sel->value; if (c->refcnt) - clk_enable_locked(p); + clk_enable(p); clk_writel(val, c->reg); if (c->refcnt && c->parent) - clk_disable_locked(c->parent); + clk_disable(c->parent); clk_reparent(c, p); return 0; @@ -979,33 +1273,153 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p) return -EINVAL; } -static int tegra2_audio_sync_clk_set_rate(struct clk *c, unsigned long rate) -{ - unsigned long parent_rate; - if (!c->parent) { - pr_err("%s: clock has no parent\n", __func__); - return -EINVAL; - } - parent_rate = c->parent->rate; - if (rate != parent_rate) { - pr_err("%s: %s/%ld differs from parent %s/%ld\n", - __func__, - c->name, rate, - c->parent->name, parent_rate); - return -EINVAL; - } - c->rate = parent_rate; - return 0; -} - static struct clk_ops tegra_audio_sync_clk_ops = { .init = tegra2_audio_sync_clk_init, .enable = tegra2_audio_sync_clk_enable, .disable = tegra2_audio_sync_clk_disable, - .set_rate = tegra2_audio_sync_clk_set_rate, .set_parent = tegra2_audio_sync_clk_set_parent, }; +/* cdev1 and cdev2 (dap_mclk1 and dap_mclk2) ops */ + +static void tegra2_cdev_clk_init(struct clk *c) +{ + /* We could un-tristate the cdev1 or cdev2 pingroup here; this is + * currently done in the pinmux code. */ + c->state = ON; + + BUG_ON(!c->u.periph.clk_num); + + if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & + PERIPH_CLK_TO_ENB_BIT(c))) + c->state = OFF; +} + +static int tegra2_cdev_clk_enable(struct clk *c) +{ + BUG_ON(!c->u.periph.clk_num); + + clk_writel(PERIPH_CLK_TO_ENB_BIT(c), + CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); + return 0; +} + +static void tegra2_cdev_clk_disable(struct clk *c) +{ + BUG_ON(!c->u.periph.clk_num); + + clk_writel(PERIPH_CLK_TO_ENB_BIT(c), + CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); +} + +static struct clk_ops tegra_cdev_clk_ops = { + .init = &tegra2_cdev_clk_init, + .enable = &tegra2_cdev_clk_enable, + .disable = &tegra2_cdev_clk_disable, +}; + +/* shared bus ops */ +/* + * Some clocks may have multiple downstream users that need to request a + * higher clock rate. Shared bus clocks provide a unique shared_bus_user + * clock to each user. The frequency of the bus is set to the highest + * enabled shared_bus_user clock, with a minimum value set by the + * shared bus. + */ +static int tegra_clk_shared_bus_update(struct clk *bus) +{ + struct clk *c; + unsigned long rate = bus->min_rate; + + list_for_each_entry(c, &bus->shared_bus_list, u.shared_bus_user.node) + if (c->u.shared_bus_user.enabled) + rate = max(c->u.shared_bus_user.rate, rate); + + if (rate == clk_get_rate_locked(bus)) + return 0; + + return clk_set_rate_locked(bus, rate); +}; + +static void tegra_clk_shared_bus_init(struct clk *c) +{ + unsigned long flags; + + c->max_rate = c->parent->max_rate; + c->u.shared_bus_user.rate = c->parent->max_rate; + c->state = OFF; + c->set = true; + + spin_lock_irqsave(&c->parent->spinlock, flags); + + list_add_tail(&c->u.shared_bus_user.node, + &c->parent->shared_bus_list); + + spin_unlock_irqrestore(&c->parent->spinlock, flags); +} + +static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) +{ + unsigned long flags; + int ret; + + rate = clk_round_rate(c->parent, rate); + if (rate < 0) + return rate; + + spin_lock_irqsave(&c->parent->spinlock, flags); + + c->u.shared_bus_user.rate = rate; + ret = tegra_clk_shared_bus_update(c->parent); + + spin_unlock_irqrestore(&c->parent->spinlock, flags); + + return ret; +} + +static long tegra_clk_shared_bus_round_rate(struct clk *c, unsigned long rate) +{ + return clk_round_rate(c->parent, rate); +} + +static int tegra_clk_shared_bus_enable(struct clk *c) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&c->parent->spinlock, flags); + + c->u.shared_bus_user.enabled = true; + ret = tegra_clk_shared_bus_update(c->parent); + + spin_unlock_irqrestore(&c->parent->spinlock, flags); + + return ret; +} + +static void tegra_clk_shared_bus_disable(struct clk *c) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&c->parent->spinlock, flags); + + c->u.shared_bus_user.enabled = false; + ret = tegra_clk_shared_bus_update(c->parent); + WARN_ON_ONCE(ret); + + spin_unlock_irqrestore(&c->parent->spinlock, flags); +} + +static struct clk_ops tegra_clk_shared_bus_ops = { + .init = tegra_clk_shared_bus_init, + .enable = tegra_clk_shared_bus_enable, + .disable = tegra_clk_shared_bus_disable, + .set_rate = tegra_clk_shared_bus_set_rate, + .round_rate = tegra_clk_shared_bus_round_rate, +}; + + /* Clock definitions */ static struct clk tegra_clk_32k = { .name = "clk_32k", @@ -1014,7 +1428,7 @@ static struct clk tegra_clk_32k = { .max_rate = 32768, }; -static struct clk_pll_table tegra_pll_s_table[] = { +static struct clk_pll_freq_table tegra_pll_s_freq_table[] = { {32768, 12000000, 366, 1, 1, 0}, {32768, 13000000, 397, 1, 1, 0}, {32768, 19200000, 586, 1, 1, 0}, @@ -1026,16 +1440,19 @@ static struct clk tegra_pll_s = { .name = "pll_s", .flags = PLL_ALT_MISC_REG, .ops = &tegra_pll_ops, - .reg = 0xf0, - .input_min = 32768, - .input_max = 32768, .parent = &tegra_clk_32k, - .cf_min = 0, /* FIXME */ - .cf_max = 0, /* FIXME */ - .vco_min = 12000000, - .vco_max = 26000000, - .pll_table = tegra_pll_s_table, .max_rate = 26000000, + .reg = 0xf0, + .u.pll = { + .input_min = 32768, + .input_max = 32768, + .cf_min = 0, /* FIXME */ + .cf_max = 0, /* FIXME */ + .vco_min = 12000000, + .vco_max = 26000000, + .freq_table = tegra_pll_s_freq_table, + .lock_delay = 300, + }, }; static struct clk_mux_sel tegra_clk_m_sel[] = { @@ -1043,18 +1460,18 @@ static struct clk_mux_sel tegra_clk_m_sel[] = { { .input = &tegra_pll_s, .value = 1}, { 0, 0}, }; + static struct clk tegra_clk_m = { .name = "clk_m", .flags = ENABLE_ON_INIT, .ops = &tegra_clk_m_ops, .inputs = tegra_clk_m_sel, .reg = 0x1fc, - .reg_mask = (1<<28), .reg_shift = 28, .max_rate = 26000000, }; -static struct clk_pll_table tegra_pll_c_table[] = { +static struct clk_pll_freq_table tegra_pll_c_freq_table[] = { { 0, 0, 0, 0, 0, 0 }, }; @@ -1063,15 +1480,18 @@ static struct clk tegra_pll_c = { .flags = PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0x80, - .input_min = 2000000, - .input_max = 31000000, .parent = &tegra_clk_m, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 20000000, - .vco_max = 1400000000, - .pll_table = tegra_pll_c_table, .max_rate = 600000000, + .u.pll = { + .input_min = 2000000, + .input_max = 31000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 20000000, + .vco_max = 1400000000, + .freq_table = tegra_pll_c_freq_table, + .lock_delay = 300, + }, }; static struct clk tegra_pll_c_out1 = { @@ -1084,7 +1504,7 @@ static struct clk tegra_pll_c_out1 = { .max_rate = 600000000, }; -static struct clk_pll_table tegra_pll_m_table[] = { +static struct clk_pll_freq_table tegra_pll_m_freq_table[] = { { 12000000, 666000000, 666, 12, 1, 8}, { 13000000, 666000000, 666, 13, 1, 8}, { 19200000, 666000000, 555, 16, 1, 8}, @@ -1101,15 +1521,18 @@ static struct clk tegra_pll_m = { .flags = PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0x90, - .input_min = 2000000, - .input_max = 31000000, .parent = &tegra_clk_m, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 20000000, - .vco_max = 1200000000, - .pll_table = tegra_pll_m_table, .max_rate = 800000000, + .u.pll = { + .input_min = 2000000, + .input_max = 31000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 20000000, + .vco_max = 1200000000, + .freq_table = tegra_pll_m_freq_table, + .lock_delay = 300, + }, }; static struct clk tegra_pll_m_out1 = { @@ -1122,7 +1545,7 @@ static struct clk tegra_pll_m_out1 = { .max_rate = 600000000, }; -static struct clk_pll_table tegra_pll_p_table[] = { +static struct clk_pll_freq_table tegra_pll_p_freq_table[] = { { 12000000, 216000000, 432, 12, 2, 8}, { 13000000, 216000000, 432, 13, 2, 8}, { 19200000, 216000000, 90, 4, 2, 1}, @@ -1139,15 +1562,18 @@ static struct clk tegra_pll_p = { .flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0xa0, - .input_min = 2000000, - .input_max = 31000000, .parent = &tegra_clk_m, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 20000000, - .vco_max = 1400000000, - .pll_table = tegra_pll_p_table, .max_rate = 432000000, + .u.pll = { + .input_min = 2000000, + .input_max = 31000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 20000000, + .vco_max = 1400000000, + .freq_table = tegra_pll_p_freq_table, + .lock_delay = 300, + }, }; static struct clk tegra_pll_p_out1 = { @@ -1190,11 +1616,9 @@ static struct clk tegra_pll_p_out4 = { .max_rate = 432000000, }; -static struct clk_pll_table tegra_pll_a_table[] = { +static struct clk_pll_freq_table tegra_pll_a_freq_table[] = { { 28800000, 56448000, 49, 25, 1, 1}, { 28800000, 73728000, 64, 25, 1, 1}, - { 28800000, 11289600, 49, 25, 1, 1}, - { 28800000, 12288000, 64, 25, 1, 1}, { 28800000, 24000000, 5, 6, 1, 1}, { 0, 0, 0, 0, 0, 0 }, }; @@ -1204,15 +1628,18 @@ static struct clk tegra_pll_a = { .flags = PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0xb0, - .input_min = 2000000, - .input_max = 31000000, .parent = &tegra_pll_p_out1, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 20000000, - .vco_max = 1400000000, - .pll_table = tegra_pll_a_table, - .max_rate = 56448000, + .max_rate = 73728000, + .u.pll = { + .input_min = 2000000, + .input_max = 31000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 20000000, + .vco_max = 1400000000, + .freq_table = tegra_pll_a_freq_table, + .lock_delay = 300, + }, }; static struct clk tegra_pll_a_out0 = { @@ -1222,14 +1649,25 @@ static struct clk tegra_pll_a_out0 = { .parent = &tegra_pll_a, .reg = 0xb4, .reg_shift = 0, - .max_rate = 56448000, + .max_rate = 73728000, }; -static struct clk_pll_table tegra_pll_d_table[] = { +static struct clk_pll_freq_table tegra_pll_d_freq_table[] = { + { 12000000, 216000000, 216, 12, 1, 4}, + { 13000000, 216000000, 216, 13, 1, 4}, + { 19200000, 216000000, 135, 12, 1, 3}, + { 26000000, 216000000, 216, 26, 1, 4}, + + { 12000000, 594000000, 594, 12, 1, 8}, + { 13000000, 594000000, 594, 13, 1, 8}, + { 19200000, 594000000, 495, 16, 1, 8}, + { 26000000, 594000000, 594, 26, 1, 8}, + { 12000000, 1000000000, 1000, 12, 1, 12}, { 13000000, 1000000000, 1000, 13, 1, 12}, { 19200000, 1000000000, 625, 12, 1, 8}, { 26000000, 1000000000, 1000, 26, 1, 12}, + { 0, 0, 0, 0, 0, 0 }, }; @@ -1238,15 +1676,18 @@ static struct clk tegra_pll_d = { .flags = PLL_HAS_CPCON | PLLD, .ops = &tegra_pll_ops, .reg = 0xd0, - .input_min = 2000000, - .input_max = 40000000, .parent = &tegra_clk_m, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 40000000, - .vco_max = 1000000000, - .pll_table = tegra_pll_d_table, .max_rate = 1000000000, + .u.pll = { + .input_min = 2000000, + .input_max = 40000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 40000000, + .vco_max = 1000000000, + .freq_table = tegra_pll_d_freq_table, + .lock_delay = 1000, + }, }; static struct clk tegra_pll_d_out0 = { @@ -1257,7 +1698,7 @@ static struct clk tegra_pll_d_out0 = { .max_rate = 500000000, }; -static struct clk_pll_table tegra_pll_u_table[] = { +static struct clk_pll_freq_table tegra_pll_u_freq_table[] = { { 12000000, 480000000, 960, 12, 2, 0}, { 13000000, 480000000, 960, 13, 2, 0}, { 19200000, 480000000, 200, 4, 2, 0}, @@ -1270,18 +1711,21 @@ static struct clk tegra_pll_u = { .flags = PLLU, .ops = &tegra_pll_ops, .reg = 0xc0, - .input_min = 2000000, - .input_max = 40000000, .parent = &tegra_clk_m, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 480000000, - .vco_max = 960000000, - .pll_table = tegra_pll_u_table, .max_rate = 480000000, -}; - -static struct clk_pll_table tegra_pll_x_table[] = { + .u.pll = { + .input_min = 2000000, + .input_max = 40000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 480000000, + .vco_max = 960000000, + .freq_table = tegra_pll_u_freq_table, + .lock_delay = 1000, + }, +}; + +static struct clk_pll_freq_table tegra_pll_x_freq_table[] = { /* 1 GHz */ { 12000000, 1000000000, 1000, 12, 1, 12}, { 13000000, 1000000000, 1000, 13, 1, 12}, @@ -1307,10 +1751,10 @@ static struct clk_pll_table tegra_pll_x_table[] = { { 26000000, 760000000, 760, 26, 1, 12}, /* 608 MHz */ - { 12000000, 608000000, 760, 12, 1, 12}, - { 13000000, 608000000, 760, 13, 1, 12}, + { 12000000, 608000000, 608, 12, 1, 12}, + { 13000000, 608000000, 608, 13, 1, 12}, { 19200000, 608000000, 380, 12, 1, 8}, - { 26000000, 608000000, 760, 26, 1, 12}, + { 26000000, 608000000, 608, 26, 1, 12}, /* 456 MHz */ { 12000000, 456000000, 456, 12, 1, 12}, @@ -1332,18 +1776,21 @@ static struct clk tegra_pll_x = { .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG, .ops = &tegra_pllx_ops, .reg = 0xe0, - .input_min = 2000000, - .input_max = 31000000, .parent = &tegra_clk_m, - .cf_min = 1000000, - .cf_max = 6000000, - .vco_min = 20000000, - .vco_max = 1200000000, - .pll_table = tegra_pll_x_table, .max_rate = 1000000000, -}; - -static struct clk_pll_table tegra_pll_e_table[] = { + .u.pll = { + .input_min = 2000000, + .input_max = 31000000, + .cf_min = 1000000, + .cf_max = 6000000, + .vco_min = 20000000, + .vco_max = 1200000000, + .freq_table = tegra_pll_x_freq_table, + .lock_delay = 300, + }, +}; + +static struct clk_pll_freq_table tegra_pll_e_freq_table[] = { { 12000000, 100000000, 200, 24, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -1352,23 +1799,49 @@ static struct clk tegra_pll_e = { .name = "pll_e", .flags = PLL_ALT_MISC_REG, .ops = &tegra_plle_ops, - .input_min = 12000000, - .input_max = 12000000, - .max_rate = 100000000, .parent = &tegra_clk_m, .reg = 0xe8, - .pll_table = tegra_pll_e_table, + .max_rate = 100000000, + .u.pll = { + .input_min = 12000000, + .input_max = 12000000, + .freq_table = tegra_pll_e_freq_table, + }, }; static struct clk tegra_clk_d = { .name = "clk_d", .flags = PERIPH_NO_RESET, .ops = &tegra_clk_double_ops, - .clk_num = 90, .reg = 0x34, .reg_shift = 12, .parent = &tegra_clk_m, .max_rate = 52000000, + .u.periph = { + .clk_num = 90, + }, +}; + +/* dap_mclk1, belongs to the cdev1 pingroup. */ +static struct clk tegra_clk_cdev1 = { + .name = "cdev1", + .ops = &tegra_cdev_clk_ops, + .rate = 26000000, + .max_rate = 26000000, + .u.periph = { + .clk_num = 94, + }, +}; + +/* dap_mclk2, belongs to the cdev2 pingroup. */ +static struct clk tegra_clk_cdev2 = { + .name = "cdev2", + .ops = &tegra_cdev_clk_ops, + .rate = 26000000, + .max_rate = 26000000, + .u.periph = { + .clk_num = 93, + }, }; /* initialized before peripheral clocks */ @@ -1394,7 +1867,7 @@ static struct clk tegra_clk_audio = { .name = "audio", .inputs = mux_audio_sync_clk, .reg = 0x38, - .max_rate = 24000000, + .max_rate = 73728000, .ops = &tegra_audio_sync_clk_ops }; @@ -1403,10 +1876,12 @@ static struct clk tegra_clk_audio_2x = { .flags = PERIPH_NO_RESET, .max_rate = 48000000, .ops = &tegra_clk_double_ops, - .clk_num = 89, .reg = 0x34, .reg_shift = 8, .parent = &tegra_clk_audio, + .u.periph = { + .clk_num = 89, + }, }; struct clk_lookup tegra_audio_clk_lookups[] = { @@ -1478,17 +1953,26 @@ static struct clk tegra_clk_sclk = { .inputs = mux_sclk, .reg = 0x28, .ops = &tegra_super_ops, - .max_rate = 600000000, + .max_rate = 240000000, + .min_rate = 120000000, }; static struct clk tegra_clk_virtual_cpu = { .name = "cpu", .parent = &tegra_clk_cclk, - .main = &tegra_pll_x, - .backup = &tegra_clk_m, .ops = &tegra_cpu_ops, .max_rate = 1000000000, - .dvfs = &tegra_dvfs_virtual_cpu_dvfs, + .u.cpu = { + .main = &tegra_pll_x, + .backup = &tegra_pll_p, + }, +}; + +static struct clk tegra_clk_cop = { + .name = "cop", + .parent = &tegra_clk_sclk, + .ops = &tegra_cop_ops, + .max_rate = 240000000, }; static struct clk tegra_clk_hclk = { @@ -1508,7 +1992,15 @@ static struct clk tegra_clk_pclk = { .reg = 0x30, .reg_shift = 0, .ops = &tegra_bus_ops, - .max_rate = 108000000, + .max_rate = 120000000, +}; + +static struct clk tegra_clk_blink = { + .name = "blink", + .parent = &tegra_clk_32k, + .reg = 0x40, + .ops = &tegra_blink_clk_ops, + .max_rate = 32768, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = { @@ -1587,6 +2079,23 @@ static struct clk_mux_sel mux_clk_32k[] = { { 0, 0}, }; +static struct clk_mux_sel mux_pclk[] = { + { .input = &tegra_clk_pclk, .value = 0}, + { 0, 0}, +}; + +static struct clk tegra_clk_emc = { + .name = "emc", + .ops = &tegra_emc_clk_ops, + .reg = 0x19c, + .max_rate = 800000000, + .inputs = mux_pllm_pllc_pllp_clkm, + .flags = MUX | DIV_U71 | PERIPH_EMC_ENB, + .u.periph = { + .clk_num = 57, + }, +}; + #define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ { \ .name = _name, \ @@ -1595,19 +2104,32 @@ static struct clk_mux_sel mux_clk_32k[] = { .con_id = _con, \ }, \ .ops = &tegra_periph_clk_ops, \ - .clk_num = _clk_num, \ .reg = _reg, \ .inputs = _inputs, \ .flags = _flags, \ .max_rate = _max, \ + .u.periph = { \ + .clk_num = _clk_num, \ + }, \ + } + +#define SHARED_CLK(_name, _dev, _con, _parent) \ + { \ + .name = _name, \ + .lookup = { \ + .dev_id = _dev, \ + .con_id = _con, \ + }, \ + .ops = &tegra_clk_shared_bus_ops, \ + .parent = _parent, \ } -struct clk tegra_periph_clks[] = { +struct clk tegra_list_clks[] = { + PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 108000000, mux_pclk, 0), PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET), PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0), - PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), - PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), - /* FIXME: spdif has 2 clocks but 1 enable */ + PERIPH_CLK("i2s1", "tegra-i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), + PERIPH_CLK("i2s2", "tegra-i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71), PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71), @@ -1620,13 +2142,15 @@ struct clk tegra_periph_clks[] = { PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ - /* FIXME: vfir shares an enable with uartb */ PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ - PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x160, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ - PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ + PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ + PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0), + PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0), + PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0), + PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */ /* FIXME: what is la? */ PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), @@ -1641,37 +2165,46 @@ struct clk tegra_periph_clks[] = { PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), - PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 216000000, mux_pllp_pllc_pllm_clkm, MUX), - PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 216000000, mux_pllp_pllc_pllm_clkm, MUX), - PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 216000000, mux_pllp_pllc_pllm_clkm, MUX), - PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 216000000, mux_pllp_pllc_pllm_clkm, MUX), - PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 216000000, mux_pllp_pllc_pllm_clkm, MUX), + PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX), + PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX), + PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX), + PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX), + PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX), PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */ PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ - /* FIXME: vi and vi_sensor share an enable */ - PERIPH_CLK("vi", "vi", NULL, 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ - PERIPH_CLK("vi_sensor", "vi_sensor", NULL, 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */ + PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ + PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */ PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ - /* FIXME: cve and tvo share an enable */ PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ - PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 148500000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ + PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ - PERIPH_CLK("disp1", "tegrafb.0", NULL, 27, 0x138, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ - PERIPH_CLK("disp2", "tegrafb.1", NULL, 26, 0x13c, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ + PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ + PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ - PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, 800000000, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB), PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */ - PERIPH_CLK("csi", "csi", NULL, 52, 0, 72000000, mux_pllp_out3, 0), - PERIPH_CLK("isp", "isp", NULL, 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */ - PERIPH_CLK("csus", "csus", NULL, 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), + PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 72000000, mux_pllp_out3, 0), + PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */ + PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), PERIPH_CLK("pex", NULL, "pex", 70, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), PERIPH_CLK("afi", NULL, "afi", 72, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), + + SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sclk), + SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc), + SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc), + SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc), + SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc), + SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc), + SHARED_CLK("host.emc", "tegra_grhost", "emc", &tegra_clk_emc), + SHARED_CLK("usbd.emc", "fsl-tegra-udc", "emc", &tegra_clk_emc), + SHARED_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc), + SHARED_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc), + SHARED_CLK("usb3.emc", "tegra-ehci.2", "emc", &tegra_clk_emc), }; #define CLK_DUPLICATE(_name, _dev, _con) \ @@ -1693,9 +2226,22 @@ struct clk_duplicate tegra_clk_duplicates[] = { CLK_DUPLICATE("uartc", "tegra_uart.2", NULL), CLK_DUPLICATE("uartd", "tegra_uart.3", NULL), CLK_DUPLICATE("uarte", "tegra_uart.4", NULL), - CLK_DUPLICATE("host1x", "tegrafb.0", "host1x"), - CLK_DUPLICATE("host1x", "tegrafb.1", "host1x"), + CLK_DUPLICATE("usbd", "utmip-pad", NULL), CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL), + CLK_DUPLICATE("usbd", "tegra-otg", NULL), + CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"), + CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"), + CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL), + CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL), + CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL), + CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL), + CLK_DUPLICATE("host1x", "tegra_grhost", "host1x"), + CLK_DUPLICATE("2d", "tegra_grhost", "gr2d"), + CLK_DUPLICATE("3d", "tegra_grhost", "gr3d"), + CLK_DUPLICATE("epp", "tegra_grhost", "epp"), + CLK_DUPLICATE("mpe", "tegra_grhost", "mpe"), + CLK_DUPLICATE("cop", "tegra-avp", "cop"), + CLK_DUPLICATE("vde", "tegra-aes", "vde"), }; #define CLK(dev, con, ck) \ @@ -1705,68 +2251,70 @@ struct clk_duplicate tegra_clk_duplicates[] = { .clk = ck, \ } -struct clk_lookup tegra_clk_lookups[] = { - /* external root sources */ - CLK(NULL, "32k_clk", &tegra_clk_32k), - CLK(NULL, "pll_s", &tegra_pll_s), - CLK(NULL, "clk_m", &tegra_clk_m), - CLK(NULL, "pll_m", &tegra_pll_m), - CLK(NULL, "pll_m_out1", &tegra_pll_m_out1), - CLK(NULL, "pll_c", &tegra_pll_c), - CLK(NULL, "pll_c_out1", &tegra_pll_c_out1), - CLK(NULL, "pll_p", &tegra_pll_p), - CLK(NULL, "pll_p_out1", &tegra_pll_p_out1), - CLK(NULL, "pll_p_out2", &tegra_pll_p_out2), - CLK(NULL, "pll_p_out3", &tegra_pll_p_out3), - CLK(NULL, "pll_p_out4", &tegra_pll_p_out4), - CLK(NULL, "pll_a", &tegra_pll_a), - CLK(NULL, "pll_a_out0", &tegra_pll_a_out0), - CLK(NULL, "pll_d", &tegra_pll_d), - CLK(NULL, "pll_d_out0", &tegra_pll_d_out0), - CLK(NULL, "pll_u", &tegra_pll_u), - CLK(NULL, "pll_x", &tegra_pll_x), - CLK(NULL, "pll_e", &tegra_pll_e), - CLK(NULL, "cclk", &tegra_clk_cclk), - CLK(NULL, "sclk", &tegra_clk_sclk), - CLK(NULL, "hclk", &tegra_clk_hclk), - CLK(NULL, "pclk", &tegra_clk_pclk), - CLK(NULL, "clk_d", &tegra_clk_d), - CLK(NULL, "cpu", &tegra_clk_virtual_cpu), -}; +struct clk *tegra_ptr_clks[] = { + &tegra_clk_32k, + &tegra_pll_s, + &tegra_clk_m, + &tegra_pll_m, + &tegra_pll_m_out1, + &tegra_pll_c, + &tegra_pll_c_out1, + &tegra_pll_p, + &tegra_pll_p_out1, + &tegra_pll_p_out2, + &tegra_pll_p_out3, + &tegra_pll_p_out4, + &tegra_pll_a, + &tegra_pll_a_out0, + &tegra_pll_d, + &tegra_pll_d_out0, + &tegra_pll_u, + &tegra_pll_x, + &tegra_pll_e, + &tegra_clk_cclk, + &tegra_clk_sclk, + &tegra_clk_hclk, + &tegra_clk_pclk, + &tegra_clk_d, + &tegra_clk_cdev1, + &tegra_clk_cdev2, + &tegra_clk_virtual_cpu, + &tegra_clk_blink, + &tegra_clk_cop, + &tegra_clk_emc, +}; + +static void tegra2_init_one_clock(struct clk *c) +{ + clk_init(c); + INIT_LIST_HEAD(&c->shared_bus_list); + if (!c->lookup.dev_id && !c->lookup.con_id) + c->lookup.con_id = c->name; + c->lookup.clk = c; + clkdev_add(&c->lookup); +} void __init tegra2_init_clocks(void) { int i; - struct clk_lookup *cl; struct clk *c; - struct clk_duplicate *cd; - - for (i = 0; i < ARRAY_SIZE(tegra_clk_lookups); i++) { - cl = &tegra_clk_lookups[i]; - clk_init(cl->clk); - clkdev_add(cl); - } - for (i = 0; i < ARRAY_SIZE(tegra_periph_clks); i++) { - c = &tegra_periph_clks[i]; - cl = &c->lookup; - cl->clk = c; + for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++) + tegra2_init_one_clock(tegra_ptr_clks[i]); - clk_init(cl->clk); - clkdev_add(cl); - } + for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++) + tegra2_init_one_clock(&tegra_list_clks[i]); for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) { - cd = &tegra_clk_duplicates[i]; - c = tegra_get_clock_by_name(cd->name); - if (c) { - cl = &cd->lookup; - cl->clk = c; - clkdev_add(cl); - } else { + c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name); + if (!c) { pr_err("%s: Unknown duplicate clock %s\n", __func__, - cd->name); + tegra_clk_duplicates[i].name); + continue; } + + tegra_clk_duplicates[i].lookup.clk = c; + clkdev_add(&tegra_clk_duplicates[i].lookup); } init_audio_sync_clock_mux(); @@ -1774,7 +2322,7 @@ void __init tegra2_init_clocks(void) #ifdef CONFIG_PM static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM + - PERIPH_CLK_SOURCE_NUM + 3]; + PERIPH_CLK_SOURCE_NUM + 22]; void tegra_clk_suspend(void) { @@ -1782,6 +2330,29 @@ void tegra_clk_suspend(void) u32 *ctx = clk_rst_suspend; *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK; + *ctx++ = clk_readl(tegra_pll_c.reg + PLL_BASE); + *ctx++ = clk_readl(tegra_pll_c.reg + PLL_MISC(&tegra_pll_c)); + *ctx++ = clk_readl(tegra_pll_a.reg + PLL_BASE); + *ctx++ = clk_readl(tegra_pll_a.reg + PLL_MISC(&tegra_pll_a)); + *ctx++ = clk_readl(tegra_pll_s.reg + PLL_BASE); + *ctx++ = clk_readl(tegra_pll_s.reg + PLL_MISC(&tegra_pll_s)); + *ctx++ = clk_readl(tegra_pll_d.reg + PLL_BASE); + *ctx++ = clk_readl(tegra_pll_d.reg + PLL_MISC(&tegra_pll_d)); + *ctx++ = clk_readl(tegra_pll_u.reg + PLL_BASE); + *ctx++ = clk_readl(tegra_pll_u.reg + PLL_MISC(&tegra_pll_u)); + + *ctx++ = clk_readl(tegra_pll_m_out1.reg); + *ctx++ = clk_readl(tegra_pll_a_out0.reg); + *ctx++ = clk_readl(tegra_pll_c_out1.reg); + + *ctx++ = clk_readl(tegra_clk_cclk.reg); + *ctx++ = clk_readl(tegra_clk_cclk.reg + SUPER_CLK_DIVIDER); + + *ctx++ = clk_readl(tegra_clk_sclk.reg); + *ctx++ = clk_readl(tegra_clk_sclk.reg + SUPER_CLK_DIVIDER); + *ctx++ = clk_readl(tegra_clk_pclk.reg); + + *ctx++ = clk_readl(tegra_clk_audio.reg); for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC; off += 4) { @@ -1800,6 +2371,8 @@ void tegra_clk_suspend(void) *ctx++ = clk_readl(MISC_CLK_ENB); *ctx++ = clk_readl(CLK_MASK_ARM); + + BUG_ON(ctx - clk_rst_suspend != ARRAY_SIZE(clk_rst_suspend)); } void tegra_clk_resume(void) @@ -1812,6 +2385,31 @@ void tegra_clk_resume(void) val |= *ctx++; clk_writel(val, OSC_CTRL); + clk_writel(*ctx++, tegra_pll_c.reg + PLL_BASE); + clk_writel(*ctx++, tegra_pll_c.reg + PLL_MISC(&tegra_pll_c)); + clk_writel(*ctx++, tegra_pll_a.reg + PLL_BASE); + clk_writel(*ctx++, tegra_pll_a.reg + PLL_MISC(&tegra_pll_a)); + clk_writel(*ctx++, tegra_pll_s.reg + PLL_BASE); + clk_writel(*ctx++, tegra_pll_s.reg + PLL_MISC(&tegra_pll_s)); + clk_writel(*ctx++, tegra_pll_d.reg + PLL_BASE); + clk_writel(*ctx++, tegra_pll_d.reg + PLL_MISC(&tegra_pll_d)); + clk_writel(*ctx++, tegra_pll_u.reg + PLL_BASE); + clk_writel(*ctx++, tegra_pll_u.reg + PLL_MISC(&tegra_pll_u)); + udelay(1000); + + clk_writel(*ctx++, tegra_pll_m_out1.reg); + clk_writel(*ctx++, tegra_pll_a_out0.reg); + clk_writel(*ctx++, tegra_pll_c_out1.reg); + + clk_writel(*ctx++, tegra_clk_cclk.reg); + clk_writel(*ctx++, tegra_clk_cclk.reg + SUPER_CLK_DIVIDER); + + clk_writel(*ctx++, tegra_clk_sclk.reg); + clk_writel(*ctx++, tegra_clk_sclk.reg + SUPER_CLK_DIVIDER); + clk_writel(*ctx++, tegra_clk_pclk.reg); + + clk_writel(*ctx++, tegra_clk_audio.reg); + /* enable all clocks before configuring clock sources */ clk_writel(0xbffffff9ul, CLK_OUT_ENB); clk_writel(0xfefffff7ul, CLK_OUT_ENB + 4); diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c deleted file mode 100644 index 5529c238dd77..000000000000 --- a/arch/arm/mach-tegra/tegra2_dvfs.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * arch/arm/mach-tegra/tegra2_dvfs.c - * - * Copyright (C) 2010 Google, Inc. - * - * Author: - * Colin Cross <ccross@google.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/kernel.h> - -#include "clock.h" -#include "tegra2_dvfs.h" - -static struct dvfs_table virtual_cpu_process_0[] = { - {314000000, 750}, - {456000000, 825}, - {608000000, 900}, - {760000000, 975}, - {817000000, 1000}, - {912000000, 1050}, - {1000000000, 1100}, - {0, 0}, -}; - -static struct dvfs_table virtual_cpu_process_1[] = { - {314000000, 750}, - {456000000, 825}, - {618000000, 900}, - {770000000, 975}, - {827000000, 1000}, - {922000000, 1050}, - {1000000000, 1100}, - {0, 0}, -}; - -static struct dvfs_table virtual_cpu_process_2[] = { - {494000000, 750}, - {675000000, 825}, - {817000000, 875}, - {922000000, 925}, - {1000000000, 975}, - {0, 0}, -}; - -static struct dvfs_table virtual_cpu_process_3[] = { - {730000000, 750}, - {760000000, 775}, - {845000000, 800}, - {1000000000, 875}, - {0, 0}, -}; - -struct dvfs tegra_dvfs_virtual_cpu_dvfs = { - .reg_id = "vdd_cpu", - .process_id_table = { - { - .process_id = 0, - .table = virtual_cpu_process_0, - }, - { - .process_id = 1, - .table = virtual_cpu_process_1, - }, - { - .process_id = 2, - .table = virtual_cpu_process_2, - }, - { - .process_id = 3, - .table = virtual_cpu_process_3, - }, - }, - .process_id_table_length = 4, - .cpu = 1, -}; diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c new file mode 100644 index 000000000000..0f7ae6e90b55 --- /dev/null +++ b/arch/arm/mach-tegra/tegra2_emc.c @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> + +#include <mach/iomap.h> + +#include "tegra2_emc.h" + +#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE +static bool emc_enable = true; +#else +static bool emc_enable; +#endif +module_param(emc_enable, bool, 0644); + +static void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE); +static const struct tegra_emc_table *tegra_emc_table; +static int tegra_emc_table_size; + +static inline void emc_writel(u32 val, unsigned long addr) +{ + writel(val, emc + addr); +} + +static inline u32 emc_readl(unsigned long addr) +{ + return readl(emc + addr); +} + +static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = { + 0x2c, /* RC */ + 0x30, /* RFC */ + 0x34, /* RAS */ + 0x38, /* RP */ + 0x3c, /* R2W */ + 0x40, /* W2R */ + 0x44, /* R2P */ + 0x48, /* W2P */ + 0x4c, /* RD_RCD */ + 0x50, /* WR_RCD */ + 0x54, /* RRD */ + 0x58, /* REXT */ + 0x5c, /* WDV */ + 0x60, /* QUSE */ + 0x64, /* QRST */ + 0x68, /* QSAFE */ + 0x6c, /* RDV */ + 0x70, /* REFRESH */ + 0x74, /* BURST_REFRESH_NUM */ + 0x78, /* PDEX2WR */ + 0x7c, /* PDEX2RD */ + 0x80, /* PCHG2PDEN */ + 0x84, /* ACT2PDEN */ + 0x88, /* AR2PDEN */ + 0x8c, /* RW2PDEN */ + 0x90, /* TXSR */ + 0x94, /* TCKE */ + 0x98, /* TFAW */ + 0x9c, /* TRPAB */ + 0xa0, /* TCLKSTABLE */ + 0xa4, /* TCLKSTOP */ + 0xa8, /* TREFBW */ + 0xac, /* QUSE_EXTRA */ + 0x114, /* FBIO_CFG6 */ + 0xb0, /* ODT_WRITE */ + 0xb4, /* ODT_READ */ + 0x104, /* FBIO_CFG5 */ + 0x2bc, /* CFG_DIG_DLL */ + 0x2c0, /* DLL_XFORM_DQS */ + 0x2c4, /* DLL_XFORM_QUSE */ + 0x2e0, /* ZCAL_REF_CNT */ + 0x2e4, /* ZCAL_WAIT_CNT */ + 0x2a8, /* AUTO_CAL_INTERVAL */ + 0x2d0, /* CFG_CLKTRIM_0 */ + 0x2d4, /* CFG_CLKTRIM_1 */ + 0x2d8, /* CFG_CLKTRIM_2 */ +}; + +/* Select the closest EMC rate that is higher than the requested rate */ +long tegra_emc_round_rate(unsigned long rate) +{ + int i; + int best = -1; + unsigned long distance = ULONG_MAX; + + if (!tegra_emc_table) + return -EINVAL; + + if (!emc_enable) + return -EINVAL; + + pr_debug("%s: %lu\n", __func__, rate); + + /* + * The EMC clock rate is twice the bus rate, and the bus rate is + * measured in kHz + */ + rate = rate / 2 / 1000; + + for (i = 0; i < tegra_emc_table_size; i++) { + if (tegra_emc_table[i].rate >= rate && + (tegra_emc_table[i].rate - rate) < distance) { + distance = tegra_emc_table[i].rate - rate; + best = i; + } + } + + if (best < 0) + return -EINVAL; + + pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate); + + return tegra_emc_table[best].rate * 2 * 1000; +} + +/* + * The EMC registers have shadow registers. When the EMC clock is updated + * in the clock controller, the shadow registers are copied to the active + * registers, allowing glitchless memory bus frequency changes. + * This function updates the shadow registers for a new clock frequency, + * and relies on the clock lock on the emc clock to avoid races between + * multiple frequency changes + */ +int tegra_emc_set_rate(unsigned long rate) +{ + int i; + int j; + + if (!tegra_emc_table) + return -EINVAL; + + /* + * The EMC clock rate is twice the bus rate, and the bus rate is + * measured in kHz + */ + rate = rate / 2 / 1000; + + for (i = 0; i < tegra_emc_table_size; i++) + if (tegra_emc_table[i].rate == rate) + break; + + if (i >= tegra_emc_table_size) + return -EINVAL; + + pr_debug("%s: setting to %lu\n", __func__, rate); + + for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) + emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]); + + emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]); + + return 0; +} + +void tegra_init_emc(const struct tegra_emc_table *table, int table_size) +{ + tegra_emc_table = table; + tegra_emc_table_size = table_size; +} diff --git a/arch/arm/mach-tegra/tegra2_emc.h b/arch/arm/mach-tegra/tegra2_emc.h new file mode 100644 index 000000000000..19f08cb31603 --- /dev/null +++ b/arch/arm/mach-tegra/tegra2_emc.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define TEGRA_EMC_NUM_REGS 46 + +struct tegra_emc_table { + unsigned long rate; + u32 regs[TEGRA_EMC_NUM_REGS]; +}; + +int tegra_emc_set_rate(unsigned long rate); +long tegra_emc_round_rate(unsigned long rate); +void tegra_init_emc(const struct tegra_emc_table *table, int table_size); diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c index 7b8ad1f98f44..0fcb1eb4214d 100644 --- a/arch/arm/mach-tegra/timer.c +++ b/arch/arm/mach-tegra/timer.c @@ -18,6 +18,7 @@ */ #include <linux/init.h> +#include <linux/err.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/interrupt.h> @@ -33,10 +34,15 @@ #include <mach/iomap.h> #include <mach/irqs.h> +#include <mach/suspend.h> #include "board.h" #include "clock.h" +#define RTC_SECONDS 0x08 +#define RTC_SHADOW_SECONDS 0x0c +#define RTC_MILLISECONDS 0x10 + #define TIMERUS_CNTR_1US 0x10 #define TIMERUS_USEC_CFG 0x14 #define TIMERUS_CNTR_FREEZE 0x4c @@ -49,9 +55,11 @@ #define TIMER_PTV 0x0 #define TIMER_PCR 0x4 -struct tegra_timer; - static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE); +static void __iomem *rtc_base = IO_ADDRESS(TEGRA_RTC_BASE); + +static struct timespec persistent_ts; +static u64 persistent_ms, last_persistent_ms; #define timer_writel(value, reg) \ __raw_writel(value, (u32)timer_reg_base + (reg)) @@ -132,6 +140,42 @@ static void notrace tegra_update_sched_clock(void) update_sched_clock(&cd, cyc, (u32)~0); } +/* + * tegra_rtc_read - Reads the Tegra RTC registers + * Care must be taken that this funciton is not called while the + * tegra_rtc driver could be executing to avoid race conditions + * on the RTC shadow register + */ +u64 tegra_rtc_read_ms(void) +{ + u32 ms = readl(rtc_base + RTC_MILLISECONDS); + u32 s = readl(rtc_base + RTC_SHADOW_SECONDS); + return (u64)s * MSEC_PER_SEC + ms; +} + +/* + * read_persistent_clock - Return time from a persistent clock. + * + * Reads the time from a source which isn't disabled during PM, the + * 32k sync timer. Convert the cycles elapsed since last read into + * nsecs and adds to a monotonically increasing timespec. + * Care must be taken that this funciton is not called while the + * tegra_rtc driver could be executing to avoid race conditions + * on the RTC shadow register + */ +void read_persistent_clock(struct timespec *ts) +{ + u64 delta; + struct timespec *tsp = &persistent_ts; + + last_persistent_ms = persistent_ms; + persistent_ms = tegra_rtc_read_ms(); + delta = persistent_ms - last_persistent_ms; + + timespec_add_ns(tsp, delta * NSEC_PER_MSEC); + *ts = *tsp; +} + static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = (struct clock_event_device *)dev_id; @@ -150,9 +194,22 @@ static struct irqaction tegra_timer_irq = { static void __init tegra_init_timer(void) { + struct clk *clk; unsigned long rate = clk_measure_input_freq(); int ret; + clk = clk_get_sys("timer", NULL); + BUG_ON(IS_ERR(clk)); + clk_enable(clk); + + /* + * rtc registers are used by read_persistent_clock, keep the rtc clock + * enabled + */ + clk = clk_get_sys("rtc-tegra", NULL); + BUG_ON(IS_ERR(clk)); + clk_enable(clk); + #ifdef CONFIG_HAVE_ARM_TWD twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600); #endif @@ -196,10 +253,22 @@ static void __init tegra_init_timer(void) tegra_clockevent.cpumask = cpu_all_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_register_device(&tegra_clockevent); - - return; } struct sys_timer tegra_timer = { .init = tegra_init_timer, }; + +#ifdef CONFIG_PM +static u32 usec_config; + +void tegra_timer_suspend(void) +{ + usec_config = timer_readl(TIMERUS_USEC_CFG); +} + +void tegra_timer_resume(void) +{ + timer_writel(usec_config, TIMERUS_USEC_CFG); +} +#endif diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c new file mode 100644 index 000000000000..88081bb3ec52 --- /dev/null +++ b/arch/arm/mach-tegra/usb_phy.c @@ -0,0 +1,795 @@ +/* + * arch/arm/mach-tegra/usb_phy.c + * + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Erik Gilling <konkers@google.com> + * Benoit Goby <benoit@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/resource.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/usb/otg.h> +#include <linux/usb/ulpi.h> +#include <asm/mach-types.h> +#include <mach/usb_phy.h> +#include <mach/iomap.h> + +#define ULPI_VIEWPORT 0x170 + +#define USB_PORTSC1 0x184 +#define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30) +#define USB_PORTSC1_PSPD(x) (((x) & 0x3) << 26) +#define USB_PORTSC1_PHCD (1 << 23) +#define USB_PORTSC1_WKOC (1 << 22) +#define USB_PORTSC1_WKDS (1 << 21) +#define USB_PORTSC1_WKCN (1 << 20) +#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16) +#define USB_PORTSC1_PP (1 << 12) +#define USB_PORTSC1_SUSP (1 << 7) +#define USB_PORTSC1_PE (1 << 2) +#define USB_PORTSC1_CCS (1 << 0) + +#define USB_SUSP_CTRL 0x400 +#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3) +#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4) +#define USB_SUSP_CLR (1 << 5) +#define USB_PHY_CLK_VALID (1 << 7) +#define UTMIP_RESET (1 << 11) +#define UHSIC_RESET (1 << 11) +#define UTMIP_PHY_ENABLE (1 << 12) +#define ULPI_PHY_ENABLE (1 << 13) +#define USB_SUSP_SET (1 << 14) +#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16) + +#define USB1_LEGACY_CTRL 0x410 +#define USB1_NO_LEGACY_MODE (1 << 0) +#define USB1_VBUS_SENSE_CTL_MASK (3 << 1) +#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1) +#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \ + (1 << 1) +#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1) +#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1) + +#define ULPI_TIMING_CTRL_0 0x424 +#define ULPI_OUTPUT_PINMUX_BYP (1 << 10) +#define ULPI_CLKOUT_PINMUX_BYP (1 << 11) + +#define ULPI_TIMING_CTRL_1 0x428 +#define ULPI_DATA_TRIMMER_LOAD (1 << 0) +#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1) +#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16) +#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17) +#define ULPI_DIR_TRIMMER_LOAD (1 << 24) +#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25) + +#define UTMIP_PLL_CFG1 0x804 +#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) +#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) + +#define UTMIP_XCVR_CFG0 0x808 +#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0) +#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8) +#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10) +#define UTMIP_FORCE_PD_POWERDOWN (1 << 14) +#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16) +#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18) +#define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25) + +#define UTMIP_BIAS_CFG0 0x80c +#define UTMIP_OTGPD (1 << 11) +#define UTMIP_BIASPD (1 << 10) + +#define UTMIP_HSRX_CFG0 0x810 +#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10) +#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15) + +#define UTMIP_HSRX_CFG1 0x814 +#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1) + +#define UTMIP_TX_CFG0 0x820 +#define UTMIP_FS_PREABMLE_J (1 << 19) +#define UTMIP_HS_DISCON_DISABLE (1 << 8) + +#define UTMIP_MISC_CFG0 0x824 +#define UTMIP_DPDM_OBSERVE (1 << 26) +#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27) +#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf) +#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe) +#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd) +#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc) +#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22) + +#define UTMIP_MISC_CFG1 0x828 +#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18) +#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6) + +#define UTMIP_DEBOUNCE_CFG0 0x82c +#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0) + +#define UTMIP_BAT_CHRG_CFG0 0x830 +#define UTMIP_PD_CHRG (1 << 0) + +#define UTMIP_SPARE_CFG0 0x834 +#define FUSE_SETUP_SEL (1 << 3) + +#define UTMIP_XCVR_CFG1 0x838 +#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0) +#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2) +#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4) +#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18) + +#define UTMIP_BIAS_CFG1 0x83c +#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3) + +static DEFINE_SPINLOCK(utmip_pad_lock); +static int utmip_pad_count; + +struct tegra_xtal_freq { + int freq; + u8 enable_delay; + u8 stable_count; + u8 active_delay; + u8 xtal_freq_count; + u16 debounce; +}; + +static const struct tegra_xtal_freq tegra_freq_table[] = { + { + .freq = 12000000, + .enable_delay = 0x02, + .stable_count = 0x2F, + .active_delay = 0x04, + .xtal_freq_count = 0x76, + .debounce = 0x7530, + }, + { + .freq = 13000000, + .enable_delay = 0x02, + .stable_count = 0x33, + .active_delay = 0x05, + .xtal_freq_count = 0x7F, + .debounce = 0x7EF4, + }, + { + .freq = 19200000, + .enable_delay = 0x03, + .stable_count = 0x4B, + .active_delay = 0x06, + .xtal_freq_count = 0xBB, + .debounce = 0xBB80, + }, + { + .freq = 26000000, + .enable_delay = 0x04, + .stable_count = 0x66, + .active_delay = 0x09, + .xtal_freq_count = 0xFE, + .debounce = 0xFDE8, + }, +}; + +static struct tegra_utmip_config utmip_default[] = { + [0] = { + .hssync_start_delay = 9, + .idle_wait_delay = 17, + .elastic_limit = 16, + .term_range_adj = 6, + .xcvr_setup = 9, + .xcvr_lsfslew = 1, + .xcvr_lsrslew = 1, + }, + [2] = { + .hssync_start_delay = 9, + .idle_wait_delay = 17, + .elastic_limit = 16, + .term_range_adj = 6, + .xcvr_setup = 9, + .xcvr_lsfslew = 2, + .xcvr_lsrslew = 2, + }, +}; + +static inline bool phy_is_ulpi(struct tegra_usb_phy *phy) +{ + return (phy->instance == 1); +} + +static int utmip_pad_open(struct tegra_usb_phy *phy) +{ + phy->pad_clk = clk_get_sys("utmip-pad", NULL); + if (IS_ERR(phy->pad_clk)) { + pr_err("%s: can't get utmip pad clock\n", __func__); + return PTR_ERR(phy->pad_clk); + } + + if (phy->instance == 0) { + phy->pad_regs = phy->regs; + } else { + phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE); + if (!phy->pad_regs) { + pr_err("%s: can't remap usb registers\n", __func__); + clk_put(phy->pad_clk); + return -ENOMEM; + } + } + return 0; +} + +static void utmip_pad_close(struct tegra_usb_phy *phy) +{ + if (phy->instance != 0) + iounmap(phy->pad_regs); + clk_put(phy->pad_clk); +} + +static void utmip_pad_power_on(struct tegra_usb_phy *phy) +{ + unsigned long val, flags; + void __iomem *base = phy->pad_regs; + + clk_enable(phy->pad_clk); + + spin_lock_irqsave(&utmip_pad_lock, flags); + + if (utmip_pad_count++ == 0) { + val = readl(base + UTMIP_BIAS_CFG0); + val &= ~(UTMIP_OTGPD | UTMIP_BIASPD); + writel(val, base + UTMIP_BIAS_CFG0); + } + + spin_unlock_irqrestore(&utmip_pad_lock, flags); + + clk_disable(phy->pad_clk); +} + +static int utmip_pad_power_off(struct tegra_usb_phy *phy) +{ + unsigned long val, flags; + void __iomem *base = phy->pad_regs; + + if (!utmip_pad_count) { + pr_err("%s: utmip pad already powered off\n", __func__); + return -EINVAL; + } + + clk_enable(phy->pad_clk); + + spin_lock_irqsave(&utmip_pad_lock, flags); + + if (--utmip_pad_count == 0) { + val = readl(base + UTMIP_BIAS_CFG0); + val |= UTMIP_OTGPD | UTMIP_BIASPD; + writel(val, base + UTMIP_BIAS_CFG0); + } + + spin_unlock_irqrestore(&utmip_pad_lock, flags); + + clk_disable(phy->pad_clk); + + return 0; +} + +static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) +{ + unsigned long timeout = 2000; + do { + if ((readl(reg) & mask) == result) + return 0; + udelay(1); + timeout--; + } while (timeout); + return -1; +} + +static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + + if (phy->instance == 0) { + val = readl(base + USB_SUSP_CTRL); + val |= USB_SUSP_SET; + writel(val, base + USB_SUSP_CTRL); + + udelay(10); + + val = readl(base + USB_SUSP_CTRL); + val &= ~USB_SUSP_SET; + writel(val, base + USB_SUSP_CTRL); + } + + if (phy->instance == 2) { + val = readl(base + USB_PORTSC1); + val |= USB_PORTSC1_PHCD; + writel(val, base + USB_PORTSC1); + } + + if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0) + pr_err("%s: timeout waiting for phy to stabilize\n", __func__); +} + +static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + + if (phy->instance == 0) { + val = readl(base + USB_SUSP_CTRL); + val |= USB_SUSP_CLR; + writel(val, base + USB_SUSP_CTRL); + + udelay(10); + + val = readl(base + USB_SUSP_CTRL); + val &= ~USB_SUSP_CLR; + writel(val, base + USB_SUSP_CTRL); + } + + if (phy->instance == 2) { + val = readl(base + USB_PORTSC1); + val &= ~USB_PORTSC1_PHCD; + writel(val, base + USB_PORTSC1); + } + + if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, + USB_PHY_CLK_VALID)) + pr_err("%s: timeout waiting for phy to stabilize\n", __func__); +} + +static int utmi_phy_power_on(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + struct tegra_utmip_config *config = phy->config; + + val = readl(base + USB_SUSP_CTRL); + val |= UTMIP_RESET; + writel(val, base + USB_SUSP_CTRL); + + if (phy->instance == 0) { + val = readl(base + USB1_LEGACY_CTRL); + val |= USB1_NO_LEGACY_MODE; + writel(val, base + USB1_LEGACY_CTRL); + } + + val = readl(base + UTMIP_TX_CFG0); + val &= ~UTMIP_FS_PREABMLE_J; + writel(val, base + UTMIP_TX_CFG0); + + val = readl(base + UTMIP_HSRX_CFG0); + val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0)); + val |= UTMIP_IDLE_WAIT(config->idle_wait_delay); + val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit); + writel(val, base + UTMIP_HSRX_CFG0); + + val = readl(base + UTMIP_HSRX_CFG1); + val &= ~UTMIP_HS_SYNC_START_DLY(~0); + val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay); + writel(val, base + UTMIP_HSRX_CFG1); + + val = readl(base + UTMIP_DEBOUNCE_CFG0); + val &= ~UTMIP_BIAS_DEBOUNCE_A(~0); + val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce); + writel(val, base + UTMIP_DEBOUNCE_CFG0); + + val = readl(base + UTMIP_MISC_CFG0); + val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE; + writel(val, base + UTMIP_MISC_CFG0); + + val = readl(base + UTMIP_MISC_CFG1); + val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0)); + val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) | + UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count); + writel(val, base + UTMIP_MISC_CFG1); + + val = readl(base + UTMIP_PLL_CFG1); + val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) | UTMIP_PLLU_ENABLE_DLY_COUNT(~0)); + val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) | + UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay); + writel(val, base + UTMIP_PLL_CFG1); + + if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) { + val = readl(base + USB_SUSP_CTRL); + val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV); + writel(val, base + USB_SUSP_CTRL); + } + + utmip_pad_power_on(phy); + + val = readl(base + UTMIP_XCVR_CFG0); + val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | + UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_SETUP(~0) | + UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0) | + UTMIP_XCVR_HSSLEW_MSB(~0)); + val |= UTMIP_XCVR_SETUP(config->xcvr_setup); + val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew); + val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew); + writel(val, base + UTMIP_XCVR_CFG0); + + val = readl(base + UTMIP_XCVR_CFG1); + val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | + UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0)); + val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj); + writel(val, base + UTMIP_XCVR_CFG1); + + val = readl(base + UTMIP_BAT_CHRG_CFG0); + val &= ~UTMIP_PD_CHRG; + writel(val, base + UTMIP_BAT_CHRG_CFG0); + + val = readl(base + UTMIP_BIAS_CFG1); + val &= ~UTMIP_BIAS_PDTRK_COUNT(~0); + val |= UTMIP_BIAS_PDTRK_COUNT(0x5); + writel(val, base + UTMIP_BIAS_CFG1); + + if (phy->instance == 0) { + val = readl(base + UTMIP_SPARE_CFG0); + if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) + val &= ~FUSE_SETUP_SEL; + else + val |= FUSE_SETUP_SEL; + writel(val, base + UTMIP_SPARE_CFG0); + } + + if (phy->instance == 2) { + val = readl(base + USB_SUSP_CTRL); + val |= UTMIP_PHY_ENABLE; + writel(val, base + USB_SUSP_CTRL); + } + + val = readl(base + USB_SUSP_CTRL); + val &= ~UTMIP_RESET; + writel(val, base + USB_SUSP_CTRL); + + if (phy->instance == 0) { + val = readl(base + USB1_LEGACY_CTRL); + val &= ~USB1_VBUS_SENSE_CTL_MASK; + val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD; + writel(val, base + USB1_LEGACY_CTRL); + + val = readl(base + USB_SUSP_CTRL); + val &= ~USB_SUSP_SET; + writel(val, base + USB_SUSP_CTRL); + } + + utmi_phy_clk_enable(phy); + + if (phy->instance == 2) { + val = readl(base + USB_PORTSC1); + val &= ~USB_PORTSC1_PTS(~0); + writel(val, base + USB_PORTSC1); + } + + return 0; +} + +static void utmi_phy_power_off(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + + utmi_phy_clk_disable(phy); + + if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) { + val = readl(base + USB_SUSP_CTRL); + val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); + val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5); + writel(val, base + USB_SUSP_CTRL); + } + + val = readl(base + USB_SUSP_CTRL); + val |= UTMIP_RESET; + writel(val, base + USB_SUSP_CTRL); + + val = readl(base + UTMIP_BAT_CHRG_CFG0); + val |= UTMIP_PD_CHRG; + writel(val, base + UTMIP_BAT_CHRG_CFG0); + + val = readl(base + UTMIP_XCVR_CFG0); + val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | + UTMIP_FORCE_PDZI_POWERDOWN; + writel(val, base + UTMIP_XCVR_CFG0); + + val = readl(base + UTMIP_XCVR_CFG1); + val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | + UTMIP_FORCE_PDDR_POWERDOWN; + writel(val, base + UTMIP_XCVR_CFG1); + + utmip_pad_power_off(phy); +} + +static void utmi_phy_preresume(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + + val = readl(base + UTMIP_TX_CFG0); + val |= UTMIP_HS_DISCON_DISABLE; + writel(val, base + UTMIP_TX_CFG0); +} + +static void utmi_phy_postresume(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + + val = readl(base + UTMIP_TX_CFG0); + val &= ~UTMIP_HS_DISCON_DISABLE; + writel(val, base + UTMIP_TX_CFG0); +} + +static void utmi_phy_restore_start(struct tegra_usb_phy *phy, + enum tegra_usb_phy_port_speed port_speed) +{ + unsigned long val; + void __iomem *base = phy->regs; + + val = readl(base + UTMIP_MISC_CFG0); + val &= ~UTMIP_DPDM_OBSERVE_SEL(~0); + if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) + val |= UTMIP_DPDM_OBSERVE_SEL_FS_K; + else + val |= UTMIP_DPDM_OBSERVE_SEL_FS_J; + writel(val, base + UTMIP_MISC_CFG0); + udelay(1); + + val = readl(base + UTMIP_MISC_CFG0); + val |= UTMIP_DPDM_OBSERVE; + writel(val, base + UTMIP_MISC_CFG0); + udelay(10); +} + +static void utmi_phy_restore_end(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + + val = readl(base + UTMIP_MISC_CFG0); + val &= ~UTMIP_DPDM_OBSERVE; + writel(val, base + UTMIP_MISC_CFG0); + udelay(10); +} + +static int ulpi_phy_power_on(struct tegra_usb_phy *phy) +{ + int ret; + unsigned long val; + void __iomem *base = phy->regs; + struct tegra_ulpi_config *config = phy->config; + + gpio_direction_output(config->reset_gpio, 0); + msleep(5); + gpio_direction_output(config->reset_gpio, 1); + + clk_enable(phy->clk); + msleep(1); + + val = readl(base + USB_SUSP_CTRL); + val |= UHSIC_RESET; + writel(val, base + USB_SUSP_CTRL); + + val = readl(base + ULPI_TIMING_CTRL_0); + val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP; + writel(val, base + ULPI_TIMING_CTRL_0); + + val = readl(base + USB_SUSP_CTRL); + val |= ULPI_PHY_ENABLE; + writel(val, base + USB_SUSP_CTRL); + + val = 0; + writel(val, base + ULPI_TIMING_CTRL_1); + + val |= ULPI_DATA_TRIMMER_SEL(4); + val |= ULPI_STPDIRNXT_TRIMMER_SEL(4); + val |= ULPI_DIR_TRIMMER_SEL(4); + writel(val, base + ULPI_TIMING_CTRL_1); + udelay(10); + + val |= ULPI_DATA_TRIMMER_LOAD; + val |= ULPI_STPDIRNXT_TRIMMER_LOAD; + val |= ULPI_DIR_TRIMMER_LOAD; + writel(val, base + ULPI_TIMING_CTRL_1); + + /* Fix VbusInvalid due to floating VBUS */ + ret = otg_io_write(phy->ulpi, 0x40, 0x08); + if (ret) { + pr_err("%s: ulpi write failed\n", __func__); + return ret; + } + + ret = otg_io_write(phy->ulpi, 0x80, 0x0B); + if (ret) { + pr_err("%s: ulpi write failed\n", __func__); + return ret; + } + + val = readl(base + USB_PORTSC1); + val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN; + writel(val, base + USB_PORTSC1); + + val = readl(base + USB_SUSP_CTRL); + val |= USB_SUSP_CLR; + writel(val, base + USB_SUSP_CTRL); + udelay(100); + + val = readl(base + USB_SUSP_CTRL); + val &= ~USB_SUSP_CLR; + writel(val, base + USB_SUSP_CTRL); + + return 0; +} + +static void ulpi_phy_power_off(struct tegra_usb_phy *phy) +{ + unsigned long val; + void __iomem *base = phy->regs; + struct tegra_ulpi_config *config = phy->config; + + /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB + * Controller to immediately bring the ULPI PHY out of low power + */ + val = readl(base + USB_PORTSC1); + val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN); + writel(val, base + USB_PORTSC1); + + gpio_direction_output(config->reset_gpio, 0); + clk_disable(phy->clk); +} + +struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, + void *config, enum tegra_usb_phy_mode phy_mode) +{ + struct tegra_usb_phy *phy; + struct tegra_ulpi_config *ulpi_config; + unsigned long parent_rate; + int i; + int err; + + phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL); + if (!phy) + return ERR_PTR(-ENOMEM); + + phy->instance = instance; + phy->regs = regs; + phy->config = config; + phy->mode = phy_mode; + + if (!phy->config) { + if (phy_is_ulpi(phy)) { + pr_err("%s: ulpi phy configuration missing", __func__); + err = -EINVAL; + goto err0; + } else { + phy->config = &utmip_default[instance]; + } + } + + phy->pll_u = clk_get_sys(NULL, "pll_u"); + if (IS_ERR(phy->pll_u)) { + pr_err("Can't get pll_u clock\n"); + err = PTR_ERR(phy->pll_u); + goto err0; + } + clk_enable(phy->pll_u); + + parent_rate = clk_get_rate(clk_get_parent(phy->pll_u)); + for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) { + if (tegra_freq_table[i].freq == parent_rate) { + phy->freq = &tegra_freq_table[i]; + break; + } + } + if (!phy->freq) { + pr_err("invalid pll_u parent rate %ld\n", parent_rate); + err = -EINVAL; + goto err1; + } + + if (phy_is_ulpi(phy)) { + ulpi_config = config; + phy->clk = clk_get_sys(NULL, ulpi_config->clk); + if (IS_ERR(phy->clk)) { + pr_err("%s: can't get ulpi clock\n", __func__); + err = -ENXIO; + goto err1; + } + tegra_gpio_enable(ulpi_config->reset_gpio); + gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b"); + gpio_direction_output(ulpi_config->reset_gpio, 0); + phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0); + phy->ulpi->io_priv = regs + ULPI_VIEWPORT; + } else { + err = utmip_pad_open(phy); + if (err < 0) + goto err1; + } + + return phy; + +err1: + clk_disable(phy->pll_u); + clk_put(phy->pll_u); +err0: + kfree(phy); + return ERR_PTR(err); +} + +int tegra_usb_phy_power_on(struct tegra_usb_phy *phy) +{ + if (phy_is_ulpi(phy)) + return ulpi_phy_power_on(phy); + else + return utmi_phy_power_on(phy); +} + +void tegra_usb_phy_power_off(struct tegra_usb_phy *phy) +{ + if (phy_is_ulpi(phy)) + ulpi_phy_power_off(phy); + else + utmi_phy_power_off(phy); +} + +void tegra_usb_phy_preresume(struct tegra_usb_phy *phy) +{ + if (!phy_is_ulpi(phy)) + utmi_phy_preresume(phy); +} + +void tegra_usb_phy_postresume(struct tegra_usb_phy *phy) +{ + if (!phy_is_ulpi(phy)) + utmi_phy_postresume(phy); +} + +void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy, + enum tegra_usb_phy_port_speed port_speed) +{ + if (!phy_is_ulpi(phy)) + utmi_phy_restore_start(phy, port_speed); +} + +void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy) +{ + if (!phy_is_ulpi(phy)) + utmi_phy_restore_end(phy); +} + +void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy) +{ + if (!phy_is_ulpi(phy)) + utmi_phy_clk_disable(phy); +} + +void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy) +{ + if (!phy_is_ulpi(phy)) + utmi_phy_clk_enable(phy); +} + +void tegra_usb_phy_close(struct tegra_usb_phy *phy) +{ + if (phy_is_ulpi(phy)) + clk_put(phy->clk); + else + utmip_pad_close(phy); + clk_disable(phy->pll_u); + clk_put(phy->pll_u); + kfree(phy); +} diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h index 450a332f1009..fe449f1a1c14 100644 --- a/arch/arm/plat-omap/include/plat/usb.h +++ b/arch/arm/plat-omap/include/plat/usb.h @@ -7,15 +7,12 @@ #include <plat/board.h> #define OMAP3_HS_USB_PORTS 3 -enum ehci_hcd_omap_mode { - EHCI_HCD_OMAP_MODE_UNKNOWN, - EHCI_HCD_OMAP_MODE_PHY, - EHCI_HCD_OMAP_MODE_TLL, - EHCI_HCD_OMAP_MODE_HSIC, -}; -enum ohci_omap3_port_mode { - OMAP_OHCI_PORT_MODE_UNUSED, +enum usbhs_omap_port_mode { + OMAP_USBHS_PORT_MODE_UNUSED, + OMAP_EHCI_PORT_MODE_PHY, + OMAP_EHCI_PORT_MODE_TLL, + OMAP_EHCI_PORT_MODE_HSIC, OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0, OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM, OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0, @@ -25,24 +22,45 @@ enum ohci_omap3_port_mode { OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0, OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM, OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0, - OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM, + OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM }; -struct ehci_hcd_omap_platform_data { - enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS]; - unsigned phy_reset:1; +struct usbhs_omap_board_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; /* have to be valid if phy_reset is true and portx is in phy mode */ int reset_gpio_port[OMAP3_HS_USB_PORTS]; + + /* Set this to true for ES2.x silicon */ + unsigned es2_compatibility:1; + + unsigned phy_reset:1; + + /* + * Regulators for USB PHYs. + * Each PHY can have a separate regulator. + */ + struct regulator *regulator[OMAP3_HS_USB_PORTS]; }; -struct ohci_hcd_omap_platform_data { - enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS]; +struct ehci_hcd_omap_platform_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; + int reset_gpio_port[OMAP3_HS_USB_PORTS]; + struct regulator *regulator[OMAP3_HS_USB_PORTS]; + unsigned phy_reset:1; +}; - /* Set this to true for ES2.x silicon */ +struct ohci_hcd_omap_platform_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; unsigned es2_compatibility:1; }; +struct usbhs_omap_platform_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; + + struct ehci_hcd_omap_platform_data *ehci_data; + struct ohci_hcd_omap_platform_data *ohci_data; +}; /*-------------------------------------------------------------------------*/ #define OMAP1_OTG_BASE 0xfffb0400 @@ -80,18 +98,18 @@ enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI}; extern void usb_musb_init(struct omap_musb_board_data *board_data); -extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata); +extern void usbhs_init(const struct usbhs_omap_board_data *pdata); -extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata); +extern int omap_usbhs_enable(struct device *dev); +extern void omap_usbhs_disable(struct device *dev); extern int omap4430_phy_power(struct device *dev, int ID, int on); extern int omap4430_phy_set_clk(struct device *dev, int on); extern int omap4430_phy_init(struct device *dev); extern int omap4430_phy_exit(struct device *dev); - +extern int omap4430_phy_suspend(struct device *dev, int suspend); #endif - /* * FIXME correct answer depends on hmc_mode, * as does (on omap1) any nonzero value for config->otg port number diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 49d3208793e5..69ddc9f76c13 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -32,7 +32,6 @@ #include <plat/mailbox.h> -static struct workqueue_struct *mboxd; static struct omap_mbox **mboxes; static int mbox_configured; @@ -197,7 +196,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox) /* no more messages in the fifo. clear IRQ source. */ ack_mbox_irq(mbox, IRQ_RX); nomem: - queue_work(mboxd, &mbox->rxq->work); + schedule_work(&mbox->rxq->work); } static irqreturn_t mbox_interrupt(int irq, void *p) @@ -307,7 +306,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox) if (!--mbox->use_count) { free_irq(mbox->irq, mbox); tasklet_kill(&mbox->txq->tasklet); - flush_work(&mbox->rxq->work); + flush_work_sync(&mbox->rxq->work); mbox_queue_free(mbox->txq); mbox_queue_free(mbox->rxq); } @@ -409,10 +408,6 @@ static int __init omap_mbox_init(void) if (err) return err; - mboxd = create_workqueue("mboxd"); - if (!mboxd) - return -ENOMEM; - /* kfifo size sanity check: alignment and minimal size */ mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, @@ -424,7 +419,6 @@ subsys_initcall(omap_mbox_init); static void __exit omap_mbox_exit(void) { - destroy_workqueue(mboxd); class_unregister(&omap_mbox_class); } module_exit(omap_mbox_exit); diff --git a/arch/arm/plat-s3c24xx/include/plat/udc.h b/arch/arm/plat-s3c24xx/include/plat/udc.h index 546bb4008f49..80457c6414aa 100644 --- a/arch/arm/plat-s3c24xx/include/plat/udc.h +++ b/arch/arm/plat-s3c24xx/include/plat/udc.h @@ -27,6 +27,10 @@ enum s3c2410_udc_cmd_e { struct s3c2410_udc_mach_info { void (*udc_command)(enum s3c2410_udc_cmd_e); void (*vbus_draw)(unsigned int ma); + + unsigned int pullup_pin; + unsigned int pullup_pin_inverted; + unsigned int vbus_pin; unsigned char vbus_pin_inverted; }; diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index c9113619029f..8d73724c0092 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c @@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ #ifdef CONFIG_CORE_TIMER_IRQ_L1 __attribute__((l1_text)) #endif irqreturn_t timer_interrupt(int irq, void *dummy) { - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); #ifdef CONFIG_IPIPE update_root_process_times(get_irq_regs()); diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 4122678529c0..c40d07f708e8 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -136,7 +136,7 @@ SECTIONS . = ALIGN(16); INIT_DATA_SECTION(16) - PERCPU(4) + PERCPU(32, 4) .exit.data : { diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c index 00eb36f8debf..20c85b5dc7d0 100644 --- a/arch/cris/arch-v10/kernel/time.c +++ b/arch/cris/arch-v10/kernel/time.c @@ -140,7 +140,7 @@ stop_watchdog(void) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ //static unsigned short myjiff; /* used by our debug routine print_timestamp */ @@ -176,7 +176,7 @@ timer_interrupt(int irq, void *dev_id) /* call the real timer interrupt handler */ - do_timer(1); + xtime_update(1); cris_do_profile(regs); /* Save profiling information */ return IRQ_HANDLED; diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 84fed3b4b079..4c9e3e1ba5d1 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -26,7 +26,9 @@ #define FLUSH_ALL (void*)0xffffffff /* Vector of locks used for various atomic operations */ -spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; +spinlock_t cris_atomic_locks[] = { + [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks) +}; /* CPU masks */ cpumask_t phys_cpu_present_map = CPU_MASK_NONE; diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index a545211e999d..bb978ede8985 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c @@ -183,7 +183,7 @@ void handle_watchdog_bite(struct pt_regs *regs) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick. + * as well as call the "xtime_update()" routine every clocktick. */ extern void cris_do_profile(struct pt_regs *regs); @@ -216,9 +216,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; /* Call the real timer interrupt handler */ - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); return IRQ_HANDLED; } diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index c49be845f96a..728bbd9e7d4c 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -102,7 +102,7 @@ SECTIONS #endif __vmlinux_end = .; /* Last address of the physical file. */ #ifdef CONFIG_ETRAX_ARCH_V32 - PERCPU(PAGE_SIZE) + PERCPU(32, PAGE_SIZE) .init.ramfs : { INIT_RAM_FS diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h index 08b3d1da3583..4bea27f50a7a 100644 --- a/arch/frv/include/asm/futex.h +++ b/arch/frv/include/asm/futex.h @@ -7,10 +7,11 @@ #include <asm/errno.h> #include <asm/uaccess.h> -extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); +extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { return -ENOSYS; } diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c index 14f64b054c7e..d155ca9e5098 100644 --- a/arch/frv/kernel/futex.c +++ b/arch/frv/kernel/futex.c @@ -18,7 +18,7 @@ * the various futex operations; MMU fault checking is ignored under no-MMU * conditions */ -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o /* * do the futex operations */ -int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index 0ddbbae83cb2..b457de496b70 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c @@ -50,21 +50,13 @@ static struct irqaction timer_irq = { /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dummy) { profile_tick(CPU_PROFILING); - /* - * Here we are in the timer irq handler. We just have irqs locally - * disabled but we don't know if the timer_bh is running on the other - * CPU. We need to avoid to SMP race with it. NOTE: we don't need - * the irq version of write_lock because as just said we have irq - * locally disabled. -arca - */ - write_seqlock(&xtime_lock); - do_timer(1); + xtime_update(1); #ifdef CONFIG_HEARTBEAT static unsigned short n; @@ -72,8 +64,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy) __set_LEDS(n); #endif /* CONFIG_HEARTBEAT */ - write_sequnlock(&xtime_lock); - update_process_times(user_mode(get_irq_regs())); return IRQ_HANDLED; diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index 8b973f3cc90e..0daae8af5787 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -37,7 +37,7 @@ SECTIONS _einittext = .; INIT_DATA_SECTION(8) - PERCPU(4096) + PERCPU(L1_CACHE_BYTES, 4096) . = ALIGN(PAGE_SIZE); __init_end = .; diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c index 165005aff9df..32263a138aa6 100644 --- a/arch/h8300/kernel/time.c +++ b/arch/h8300/kernel/time.c @@ -35,9 +35,7 @@ void h8300_timer_tick(void) { if (current->pid) profile_tick(CPU_PROFILING); - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); update_process_times(user_mode(get_irq_regs())); } diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c index 3946c0fa8374..7a1533fad47d 100644 --- a/arch/h8300/kernel/timer/timer8.c +++ b/arch/h8300/kernel/timer/timer8.c @@ -61,7 +61,7 @@ /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dev_id) diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 3ded8fe62759..1d7bca0a396d 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -233,3 +233,4 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y +CONFIG_MISC_DEVICES=y diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 3a98b2dd58ac..b11fa880e4b6 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -208,3 +208,4 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_CRYPTO_MD5=y +CONFIG_MISC_DEVICES=y diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 13633da0d3de..bff0824cf8a4 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -390,8 +390,7 @@ static void rs_unthrottle(struct tty_struct * tty) } -static int rs_ioctl(struct tty_struct *tty, struct file * file, - unsigned int cmd, unsigned long arg) +static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index a2e7368a0150..4336d080b241 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -12,6 +12,8 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK +#define DMA_ERROR_CODE 0 + extern struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index c7f0f062239c..8428525ddb22 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -46,7 +46,7 @@ do { \ } while (0) static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; + unsigned long prev; __asm__ __volatile__( " mf;; \n" " mov ar.ccv=%3;; \n" "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8) + : "=r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); + *uval = prev; return r8; } } diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 215d5454c7d3..3027e7516d85 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -25,20 +25,8 @@ #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." #endif -#include <linux/list.h> -#include <linux/spinlock.h> - #include <asm/intrinsics.h> -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; - spinlock_t wait_lock; - struct list_head wait_list; -}; - #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) #define RWSEM_ACTIVE_BIAS (1L) #define RWSEM_ACTIVE_MASK (0xffffffffL) @@ -46,26 +34,6 @@ struct rw_semaphore { #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -static inline void -init_rwsem (struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} - /* * lock for reading */ @@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem) #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* _ASM_IA64_RWSEM_H */ diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h index 96fc62366aa4..ed28bcd5bb85 100644 --- a/arch/ia64/include/asm/xen/hypercall.h +++ b/arch/ia64/include/asm/xen/hypercall.h @@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2, static inline int xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) { - return _hypercall2(int, sched_op_new, cmd, arg); + return _hypercall2(int, sched_op, cmd, arg); } static inline long diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1753f6a30d55..80d50b83d419 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -582,6 +582,8 @@ out: /* Get the CPE error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); + local_irq_disable(); + return IRQ_HANDLED; } @@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = __get_free_pages(GFP_KERNEL, get_order(sz)); + data = (void *)__get_free_pages(GFP_KERNEL, + get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 9702fa92489e..156ad803d5b7 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id) new_itm += local_cpu_data->itm_delta; - if (smp_processor_id() == time_keeper_id) { - /* - * Here we are in the timer irq handler. We have irqs locally - * disabled, but we don't know if the timer_bh is running on - * another CPU. We need to avoid to SMP race by acquiring the - * xtime_lock. - */ - write_seqlock(&xtime_lock); - do_timer(1); - local_cpu_data->itm_next = new_itm; - write_sequnlock(&xtime_lock); - } else - local_cpu_data->itm_next = new_itm; + if (smp_processor_id() == time_keeper_id) + xtime_update(1); + + local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; @@ -222,7 +213,7 @@ skip_process_time_accounting: * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call - * do_timer() which in turn would let our clock run + * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 5a4d044dcb1c..787de4a77d82 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -198,7 +198,7 @@ SECTIONS { /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); - PERCPU_VADDR(PERCPU_ADDR, :percpu) + PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) __phys_per_cpu_start = __per_cpu_load; /* * ensure percpu data fits diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index dbc4cbecb5ed..77db0b514fa4 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void) /* * Don't check status. The SAL call is not supported on all PROMs * but a failure is harmless. - * Architechtuallly, cpu_init is always called twice on cpu 0. We + * Architecturally, cpu_init is always called twice on cpu 0. We * should set cpu_number on cpu 0 once. */ if (cpuid == 0) { diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 4d4536e3b6f3..9c271be9919a 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) * use the GART mapped mode. */ static u64 -tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) +tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { u64 mapaddr; diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c index fd66b048c6fa..419c8620945a 100644 --- a/arch/ia64/xen/suspend.c +++ b/arch/ia64/xen/suspend.c @@ -37,19 +37,14 @@ xen_mm_unpin_all(void) /* nothing */ } -void xen_pre_device_suspend(void) -{ - /* nothing */ -} - void -xen_pre_suspend() +xen_arch_pre_suspend() { /* nothing */ } void -xen_post_suspend(int suspend_cancelled) +xen_arch_post_suspend(int suspend_cancelled) { if (suspend_cancelled) return; diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index c1c544513e8d..1f8244a78bee 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c @@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm) run_posix_cpu_timers(p); delta_itm += local_cpu_data->itm_delta * (stolen + blocked); - if (cpu == time_keeper_id) { - write_seqlock(&xtime_lock); - do_timer(stolen + blocked); - local_cpu_data->itm_next = delta_itm + new_itm; - write_sequnlock(&xtime_lock); - } else { - local_cpu_data->itm_next = delta_itm + new_itm; - } + if (cpu == time_keeper_id) + xtime_update(stolen + blocked); + + local_cpu_data->itm_next = delta_itm + new_itm; + per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; } diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index bda86820bffd..84dd04048db9 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -107,15 +107,14 @@ u32 arch_gettimeoffset(void) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dev_id) { #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); #endif - /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ - do_timer(1); + xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 7da94eaa082b..c194d64cdbb9 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -53,7 +53,7 @@ SECTIONS __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - PERCPU(PAGE_SIZE) + PERCPU(32, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index bc9271b85759..a85e251c411f 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -554,14 +554,6 @@ config MVME147_SCC This is the driver for the serial ports on the Motorola MVME147 boards. Everyone using one of these boards should say Y here. -config SERIAL167 - bool "CD2401 support for MVME166/7 serial ports" - depends on MVME16x - help - This is the driver for the serial ports on the Motorola MVME166, - 167, and 172 boards. Everyone using one of these boards should say - Y here. - config MVME162_SCC bool "SCC support for MVME162 serial ports" depends on MVME16x && BROKEN diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 9fe6fefb5e14..1edd95095cb4 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -45,8 +45,8 @@ extern int bvme6000_set_clock_mmss (unsigned long); extern void bvme6000_reset (void); void bvme6000_set_vectors (void); -/* Save tick handler routine pointer, will point to do_timer() in - * kernel/sched.c, called via bvme6000_process_int() */ +/* Save tick handler routine pointer, will point to xtime_update() in + * kernel/timer/timekeeping.c, called via bvme6000_process_int() */ static irq_handler_t tick_handler; diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h index 213028cbe110..c94557b91448 100644 --- a/arch/m68k/include/asm/coldfire.h +++ b/arch/m68k/include/asm/coldfire.h @@ -14,39 +14,35 @@ /* - * Define master clock frequency. This is essentially done at config - * time now. No point enumerating dozens of possible clock options - * here. Also the peripheral clock (bus clock) divide ratio is set - * at config time too. + * Define master clock frequency. This is done at config time now. + * No point enumerating dozens of possible clock options here. And + * in any case new boards come along from time to time that have yet + * another different clocking frequency. */ #ifdef CONFIG_CLOCK_SET #define MCF_CLK CONFIG_CLOCK_FREQ -#define MCF_BUSCLK (CONFIG_CLOCK_FREQ / CONFIG_CLOCK_DIV) #else #error "Don't know what your ColdFire CPU clock frequency is??" #endif /* - * Define the processor support peripherals base address. - * This is generally setup by the boards start up code. + * Define the processor internal peripherals base address. + * + * The majority of ColdFire parts use an MBAR register to set + * the base address. Some have an IPSBAR register instead, and it + * has slightly different rules on its size and alignment. Some + * parts have fixed addresses and the internal peripherals cannot + * be relocated in the CPU address space. + * + * The value of MBAR or IPSBAR is config time selectable, we no + * longer hard define it here. No MBAR or IPSBAR will be defined if + * this part has a fixed peripheral address map. */ -#define MCF_MBAR 0x10000000 -#define MCF_MBAR2 0x80000000 -#if defined(CONFIG_M54xx) -#define MCF_IPSBAR MCF_MBAR -#elif defined(CONFIG_M520x) -#define MCF_IPSBAR 0xFC000000 -#else -#define MCF_IPSBAR 0x40000000 +#ifdef CONFIG_MBAR +#define MCF_MBAR CONFIG_MBAR #endif - -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) -#undef MCF_MBAR -#define MCF_MBAR MCF_IPSBAR -#elif defined(CONFIG_M532x) -#undef MCF_MBAR -#define MCF_MBAR 0x00000000 +#ifdef CONFIG_IPSBAR +#define MCF_IPSBAR CONFIG_IPSBAR #endif /****************************************************************************/ diff --git a/arch/m68k/include/asm/m5206sim.h b/arch/m68k/include/asm/m5206sim.h index 561b03b5ddf8..9015eadd5c00 100644 --- a/arch/m68k/include/asm/m5206sim.h +++ b/arch/m68k/include/asm/m5206sim.h @@ -14,6 +14,7 @@ #define CPU_NAME "COLDFIRE(m5206)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK MCF_CLK #include <asm/m52xxacr.h> @@ -48,14 +49,14 @@ #define MCFSIM_SWIVR 0x42 /* SW Watchdog intr reg (r/w) */ #define MCFSIM_SWSR 0x43 /* SW Watchdog service (r/w) */ -#define MCFSIM_DCRR 0x46 /* DRAM Refresh reg (r/w) */ -#define MCFSIM_DCTR 0x4a /* DRAM Timing reg (r/w) */ -#define MCFSIM_DAR0 0x4c /* DRAM 0 Address reg(r/w) */ -#define MCFSIM_DMR0 0x50 /* DRAM 0 Mask reg (r/w) */ -#define MCFSIM_DCR0 0x57 /* DRAM 0 Control reg (r/w) */ -#define MCFSIM_DAR1 0x58 /* DRAM 1 Address reg (r/w) */ -#define MCFSIM_DMR1 0x5c /* DRAM 1 Mask reg (r/w) */ -#define MCFSIM_DCR1 0x63 /* DRAM 1 Control reg (r/w) */ +#define MCFSIM_DCRR (MCF_MBAR + 0x46) /* DRAM Refresh reg (r/w) */ +#define MCFSIM_DCTR (MCF_MBAR + 0x4a) /* DRAM Timing reg (r/w) */ +#define MCFSIM_DAR0 (MCF_MBAR + 0x4c) /* DRAM 0 Address reg(r/w) */ +#define MCFSIM_DMR0 (MCF_MBAR + 0x50) /* DRAM 0 Mask reg (r/w) */ +#define MCFSIM_DCR0 (MCF_MBAR + 0x57) /* DRAM 0 Control reg (r/w) */ +#define MCFSIM_DAR1 (MCF_MBAR + 0x58) /* DRAM 1 Address reg (r/w) */ +#define MCFSIM_DMR1 (MCF_MBAR + 0x5c) /* DRAM 1 Mask reg (r/w) */ +#define MCFSIM_DCR1 (MCF_MBAR + 0x63) /* DRAM 1 Control reg (r/w) */ #define MCFSIM_CSAR0 0x64 /* CS 0 Address 0 reg (r/w) */ #define MCFSIM_CSMR0 0x68 /* CS 0 Mask 0 reg (r/w) */ @@ -89,9 +90,15 @@ #define MCFSIM_PAR 0xcb /* Pin Assignment reg (r/w) */ #endif +#define MCFTIMER_BASE1 (MCF_MBAR + 0x100) /* Base of TIMER1 */ +#define MCFTIMER_BASE2 (MCF_MBAR + 0x120) /* Base of TIMER2 */ + #define MCFSIM_PADDR (MCF_MBAR + 0x1c5) /* Parallel Direction (r/w) */ #define MCFSIM_PADAT (MCF_MBAR + 0x1c9) /* Parallel Port Value (r/w) */ +#define MCFDMA_BASE0 (MCF_MBAR + 0x200) /* Base address DMA 0 */ +#define MCFDMA_BASE1 (MCF_MBAR + 0x240) /* Base address DMA 1 */ + #if defined(CONFIG_NETtel) #define MCFUART_BASE1 0x180 /* Base address of UART1 */ #define MCFUART_BASE2 0x140 /* Base address of UART2 */ diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h index 88ed8239fe4e..55d5a4c5fe0b 100644 --- a/arch/m68k/include/asm/m520xsim.h +++ b/arch/m68k/include/asm/m520xsim.h @@ -13,13 +13,14 @@ #define CPU_NAME "COLDFIRE(m520x)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m52xxacr.h> /* * Define the 520x SIM register set addresses. */ -#define MCFICM_INTC0 0x48000 /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC0 0xFC048000 /* Base for Interrupt Ctrl 0 */ #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ @@ -35,9 +36,9 @@ * address to the SIMR and CIMR registers (not offsets into IPSBAR). * The 520x family only has a single INTC unit. */ -#define MCFINTC0_SIMR (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_SIMR) -#define MCFINTC0_CIMR (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_CIMR) -#define MCFINTC0_ICR0 (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0) +#define MCFINTC0_SIMR (MCFICM_INTC0 + MCFINTC_SIMR) +#define MCFINTC0_CIMR (MCFICM_INTC0 + MCFINTC_CIMR) +#define MCFINTC0_ICR0 (MCFICM_INTC0 + MCFINTC_ICR0) #define MCFINTC1_SIMR (0) #define MCFINTC1_CIMR (0) #define MCFINTC1_ICR0 (0) @@ -52,19 +53,22 @@ /* * SDRAM configuration registers. */ -#define MCFSIM_SDMR 0x000a8000 /* SDRAM Mode/Extended Mode Register */ -#define MCFSIM_SDCR 0x000a8004 /* SDRAM Control Register */ -#define MCFSIM_SDCFG1 0x000a8008 /* SDRAM Configuration Register 1 */ -#define MCFSIM_SDCFG2 0x000a800c /* SDRAM Configuration Register 2 */ -#define MCFSIM_SDCS0 0x000a8110 /* SDRAM Chip Select 0 Configuration */ -#define MCFSIM_SDCS1 0x000a8114 /* SDRAM Chip Select 1 Configuration */ +#define MCFSIM_SDMR 0xFC0a8000 /* SDRAM Mode/Extended Mode Register */ +#define MCFSIM_SDCR 0xFC0a8004 /* SDRAM Control Register */ +#define MCFSIM_SDCFG1 0xFC0a8008 /* SDRAM Configuration Register 1 */ +#define MCFSIM_SDCFG2 0xFC0a800c /* SDRAM Configuration Register 2 */ +#define MCFSIM_SDCS0 0xFC0a8110 /* SDRAM Chip Select 0 Configuration */ +#define MCFSIM_SDCS1 0xFC0a8114 /* SDRAM Chip Select 1 Configuration */ /* * EPORT and GPIO registers. */ +#define MCFEPORT_EPPAR 0xFC088000 #define MCFEPORT_EPDDR 0xFC088002 +#define MCFEPORT_EPIER 0xFC088003 #define MCFEPORT_EPDR 0xFC088004 #define MCFEPORT_EPPDR 0xFC088005 +#define MCFEPORT_EPFR 0xFC088006 #define MCFGPIO_PODR_BUSCTL 0xFC0A4000 #define MCFGPIO_PODR_BE 0xFC0A4001 @@ -119,10 +123,10 @@ #define MCFGPIO_IRQ_MAX 8 #define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE -#define MCF_GPIO_PAR_UART (0xA4036) -#define MCF_GPIO_PAR_FECI2C (0xA4033) -#define MCF_GPIO_PAR_QSPI (0xA4034) -#define MCF_GPIO_PAR_FEC (0xA4038) +#define MCF_GPIO_PAR_UART 0xFC0A4036 +#define MCF_GPIO_PAR_FECI2C 0xFC0A4033 +#define MCF_GPIO_PAR_QSPI 0xFC0A4034 +#define MCF_GPIO_PAR_FEC 0xFC0A4038 #define MCF_GPIO_PAR_UART_PAR_URXD0 (0x0001) #define MCF_GPIO_PAR_UART_PAR_UTXD0 (0x0002) @@ -134,11 +138,23 @@ #define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 (0x04) /* + * PIT timer module. + */ +#define MCFPIT_BASE1 0xFC080000 /* Base address of TIMER1 */ +#define MCFPIT_BASE2 0xFC084000 /* Base address of TIMER2 */ + +/* * UART module. */ -#define MCFUART_BASE1 0x60000 /* Base address of UART1 */ -#define MCFUART_BASE2 0x64000 /* Base address of UART2 */ -#define MCFUART_BASE3 0x68000 /* Base address of UART2 */ +#define MCFUART_BASE1 0xFC060000 /* Base address of UART1 */ +#define MCFUART_BASE2 0xFC064000 /* Base address of UART2 */ +#define MCFUART_BASE3 0xFC068000 /* Base address of UART2 */ + +/* + * FEC module. + */ +#define MCFFEC_BASE 0xFC030000 /* Base of FEC ethernet */ +#define MCFFEC_SIZE 0x800 /* Register set size */ /* * Reset Controll Unit. diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h index 4ad7a00257a8..8996df62ede4 100644 --- a/arch/m68k/include/asm/m523xsim.h +++ b/arch/m68k/include/asm/m523xsim.h @@ -13,14 +13,16 @@ #define CPU_NAME "COLDFIRE(m523x)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m52xxacr.h> /* * Define the 523x SIM register set addresses. */ -#define MCFICM_INTC0 0x0c00 /* Base for Interrupt Ctrl 0 */ -#define MCFICM_INTC1 0x0d00 /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC0 (MCF_IPSBAR + 0x0c00) /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC1 (MCF_IPSBAR + 0x0d00) /* Base for Interrupt Ctrl 0 */ + #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ @@ -39,11 +41,11 @@ /* * SDRAM configuration registers. */ -#define MCFSIM_DCR 0x44 /* SDRAM control */ -#define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ -#define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ -#define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ -#define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ +#define MCFSIM_DCR (MCF_IPSBAR + 0x44) /* Control */ +#define MCFSIM_DACR0 (MCF_IPSBAR + 0x48) /* Base address 0 */ +#define MCFSIM_DMR0 (MCF_IPSBAR + 0x4c) /* Address mask 0 */ +#define MCFSIM_DACR1 (MCF_IPSBAR + 0x50) /* Base address 1 */ +#define MCFSIM_DMR1 (MCF_IPSBAR + 0x54) /* Address mask 1 */ /* * Reset Controll Unit (relative to IPSBAR). @@ -57,10 +59,19 @@ /* * UART module. */ -#define MCFUART_BASE1 0x200 /* Base address of UART1 */ -#define MCFUART_BASE2 0x240 /* Base address of UART2 */ -#define MCFUART_BASE3 0x280 /* Base address of UART3 */ +#define MCFUART_BASE1 (MCF_IPSBAR + 0x200) +#define MCFUART_BASE2 (MCF_IPSBAR + 0x240) +#define MCFUART_BASE3 (MCF_IPSBAR + 0x280) + +/* + * FEC ethernet module. + */ +#define MCFFEC_BASE (MCF_IPSBAR + 0x1000) +#define MCFFEC_SIZE 0x800 +/* + * GPIO module. + */ #define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000) #define MCFGPIO_PODR_DATAH (MCF_IPSBAR + 0x100001) #define MCFGPIO_PODR_DATAL (MCF_IPSBAR + 0x100002) @@ -118,12 +129,22 @@ #define MCFGPIO_PCLRR_ETPU (MCF_IPSBAR + 0x10003C) /* - * EPort + * PIT timer base addresses. */ +#define MCFPIT_BASE1 (MCF_IPSBAR + 0x150000) +#define MCFPIT_BASE2 (MCF_IPSBAR + 0x160000) +#define MCFPIT_BASE3 (MCF_IPSBAR + 0x170000) +#define MCFPIT_BASE4 (MCF_IPSBAR + 0x180000) +/* + * EPort + */ +#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000) #define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002) +#define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003) #define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004) #define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005) +#define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006) /* * Generic GPIO support @@ -143,5 +164,14 @@ */ #define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A) #define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C) + +/* + * DMA unit base addresses. + */ +#define MCFDMA_BASE0 (MCF_IPSBAR + 0x100) +#define MCFDMA_BASE1 (MCF_IPSBAR + 0x140) +#define MCFDMA_BASE2 (MCF_IPSBAR + 0x180) +#define MCFDMA_BASE3 (MCF_IPSBAR + 0x1C0) + /****************************************************************************/ #endif /* m523xsim_h */ diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h index 4908b118f2fd..805714ca8d7d 100644 --- a/arch/m68k/include/asm/m5249sim.h +++ b/arch/m68k/include/asm/m5249sim.h @@ -13,10 +13,16 @@ #define CPU_NAME "COLDFIRE(m5249)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m52xxacr.h> /* + * The 5249 has a second MBAR region, define its address. + */ +#define MCF_MBAR2 0x80000000 + +/* * Define the 5249 SIM register set addresses. */ #define MCFSIM_RSR 0x00 /* Reset Status reg (r/w) */ @@ -55,11 +61,17 @@ #define MCFSIM_CSMR3 0xa8 /* CS 3 Mask reg (r/w) */ #define MCFSIM_CSCR3 0xae /* CS 3 Control reg (r/w) */ -#define MCFSIM_DCR 0x100 /* DRAM Control reg (r/w) */ -#define MCFSIM_DACR0 0x108 /* DRAM 0 Addr and Ctrl (r/w) */ -#define MCFSIM_DMR0 0x10c /* DRAM 0 Mask reg (r/w) */ -#define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */ -#define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */ +#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */ +#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */ +#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */ +#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */ +#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */ + +/* + * Timer module. + */ +#define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */ +#define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */ /* * UART module. @@ -68,6 +80,14 @@ #define MCFUART_BASE2 0x200 /* Base address of UART2 */ /* + * DMA unit base addresses. + */ +#define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */ +#define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */ +#define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */ +#define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */ + +/* * Some symbol defines for the above... */ #define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */ diff --git a/arch/m68k/include/asm/m5272sim.h b/arch/m68k/include/asm/m5272sim.h index b7cc50abc831..759c2b07a994 100644 --- a/arch/m68k/include/asm/m5272sim.h +++ b/arch/m68k/include/asm/m5272sim.h @@ -14,6 +14,7 @@ #define CPU_NAME "COLDFIRE(m5272)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK MCF_CLK #include <asm/m52xxacr.h> @@ -80,6 +81,13 @@ #define MCFSIM_PCDAT (MCF_MBAR + 0x96) /* Port C Data (r/w) */ #define MCFSIM_PDCNT (MCF_MBAR + 0x98) /* Port D Control (r/w) */ +#define MCFDMA_BASE0 (MCF_MBAR + 0xe0) /* Base address DMA 0 */ + +#define MCFTIMER_BASE1 (MCF_MBAR + 0x200) /* Base address TIMER1 */ +#define MCFTIMER_BASE2 (MCF_MBAR + 0x220) /* Base address TIMER2 */ +#define MCFTIMER_BASE3 (MCF_MBAR + 0x240) /* Base address TIMER4 */ +#define MCFTIMER_BASE4 (MCF_MBAR + 0x260) /* Base address TIMER3 */ + /* * Define system peripheral IRQ usage. */ diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h index e8042e8bc003..74855a66c050 100644 --- a/arch/m68k/include/asm/m527xsim.h +++ b/arch/m68k/include/asm/m527xsim.h @@ -13,14 +13,16 @@ #define CPU_NAME "COLDFIRE(m527x)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m52xxacr.h> /* * Define the 5270/5271 SIM register set addresses. */ -#define MCFICM_INTC0 0x0c00 /* Base for Interrupt Ctrl 0 */ -#define MCFICM_INTC1 0x0d00 /* Base for Interrupt Ctrl 1 */ +#define MCFICM_INTC0 (MCF_IPSBAR + 0x0c00) /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC1 (MCF_IPSBAR + 0x0d00) /* Base for Interrupt Ctrl 1 */ + #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ @@ -42,29 +44,45 @@ * SDRAM configuration registers. */ #ifdef CONFIG_M5271 -#define MCFSIM_DCR 0x40 /* SDRAM control */ -#define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ -#define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ -#define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ -#define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ +#define MCFSIM_DCR (MCF_IPSBAR + 0x40) /* Control */ +#define MCFSIM_DACR0 (MCF_IPSBAR + 0x48) /* Base address 0 */ +#define MCFSIM_DMR0 (MCF_IPSBAR + 0x4c) /* Address mask 0 */ +#define MCFSIM_DACR1 (MCF_IPSBAR + 0x50) /* Base address 1 */ +#define MCFSIM_DMR1 (MCF_IPSBAR + 0x54) /* Address mask 1 */ #endif #ifdef CONFIG_M5275 -#define MCFSIM_DMR 0x40 /* SDRAM mode */ -#define MCFSIM_DCR 0x44 /* SDRAM control */ -#define MCFSIM_DCFG1 0x48 /* SDRAM configuration 1 */ -#define MCFSIM_DCFG2 0x4c /* SDRAM configuration 2 */ -#define MCFSIM_DBAR0 0x50 /* SDRAM base address 0 */ -#define MCFSIM_DMR0 0x54 /* SDRAM address mask 0 */ -#define MCFSIM_DBAR1 0x58 /* SDRAM base address 1 */ -#define MCFSIM_DMR1 0x5c /* SDRAM address mask 1 */ +#define MCFSIM_DMR (MCF_IPSBAR + 0x40) /* Mode */ +#define MCFSIM_DCR (MCF_IPSBAR + 0x44) /* Control */ +#define MCFSIM_DCFG1 (MCF_IPSBAR + 0x48) /* Configuration 1 */ +#define MCFSIM_DCFG2 (MCF_IPSBAR + 0x4c) /* Configuration 2 */ +#define MCFSIM_DBAR0 (MCF_IPSBAR + 0x50) /* Base address 0 */ +#define MCFSIM_DMR0 (MCF_IPSBAR + 0x54) /* Address mask 0 */ +#define MCFSIM_DBAR1 (MCF_IPSBAR + 0x58) /* Base address 1 */ +#define MCFSIM_DMR1 (MCF_IPSBAR + 0x5c) /* Address mask 1 */ #endif /* + * DMA unit base addresses. + */ +#define MCFDMA_BASE0 (MCF_IPSBAR + 0x100) +#define MCFDMA_BASE1 (MCF_IPSBAR + 0x140) +#define MCFDMA_BASE2 (MCF_IPSBAR + 0x180) +#define MCFDMA_BASE3 (MCF_IPSBAR + 0x1C0) + +/* * UART module. */ -#define MCFUART_BASE1 0x200 /* Base address of UART1 */ -#define MCFUART_BASE2 0x240 /* Base address of UART2 */ -#define MCFUART_BASE3 0x280 /* Base address of UART3 */ +#define MCFUART_BASE1 (MCF_IPSBAR + 0x200) +#define MCFUART_BASE2 (MCF_IPSBAR + 0x240) +#define MCFUART_BASE3 (MCF_IPSBAR + 0x280) + +/* + * FEC ethernet module. + */ +#define MCFFEC_BASE0 (MCF_IPSBAR + 0x1000) +#define MCFFEC_SIZE0 0x800 +#define MCFFEC_BASE1 (MCF_IPSBAR + 0x1800) +#define MCFFEC_SIZE1 0x800 #ifdef CONFIG_M5271 #define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000) @@ -231,14 +249,22 @@ #endif /* - * EPort + * PIT timer base addresses. */ +#define MCFPIT_BASE1 (MCF_IPSBAR + 0x150000) +#define MCFPIT_BASE2 (MCF_IPSBAR + 0x160000) +#define MCFPIT_BASE3 (MCF_IPSBAR + 0x170000) +#define MCFPIT_BASE4 (MCF_IPSBAR + 0x180000) +/* + * EPort + */ +#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000) #define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002) +#define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003) #define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004) #define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005) - - +#define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006) /* * GPIO pins setups to enable the UARTs. diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h index a6d2f4d9aaa0..d798bd5df56c 100644 --- a/arch/m68k/include/asm/m528xsim.h +++ b/arch/m68k/include/asm/m528xsim.h @@ -13,14 +13,16 @@ #define CPU_NAME "COLDFIRE(m528x)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK MCF_CLK #include <asm/m52xxacr.h> /* * Define the 5280/5282 SIM register set addresses. */ -#define MCFICM_INTC0 0x0c00 /* Base for Interrupt Ctrl 0 */ -#define MCFICM_INTC1 0x0d00 /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC0 (MCF_IPSBAR + 0x0c00) /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC1 (MCF_IPSBAR + 0x0d00) /* Base for Interrupt Ctrl 0 */ + #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ @@ -39,18 +41,32 @@ /* * SDRAM configuration registers. */ -#define MCFSIM_DCR 0x44 /* SDRAM control */ -#define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ -#define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ -#define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ -#define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ +#define MCFSIM_DCR (MCF_IPSBAR + 0x00000044) /* Control */ +#define MCFSIM_DACR0 (MCF_IPSBAR + 0x00000048) /* Base address 0 */ +#define MCFSIM_DMR0 (MCF_IPSBAR + 0x0000004c) /* Address mask 0 */ +#define MCFSIM_DACR1 (MCF_IPSBAR + 0x00000050) /* Base address 1 */ +#define MCFSIM_DMR1 (MCF_IPSBAR + 0x00000054) /* Address mask 1 */ + +/* + * DMA unit base addresses. + */ +#define MCFDMA_BASE0 (MCF_IPSBAR + 0x00000100) +#define MCFDMA_BASE1 (MCF_IPSBAR + 0x00000140) +#define MCFDMA_BASE2 (MCF_IPSBAR + 0x00000180) +#define MCFDMA_BASE3 (MCF_IPSBAR + 0x000001C0) /* * UART module. */ -#define MCFUART_BASE1 0x200 /* Base address of UART1 */ -#define MCFUART_BASE2 0x240 /* Base address of UART2 */ -#define MCFUART_BASE3 0x280 /* Base address of UART3 */ +#define MCFUART_BASE1 (MCF_IPSBAR + 0x00000200) +#define MCFUART_BASE2 (MCF_IPSBAR + 0x00000240) +#define MCFUART_BASE3 (MCF_IPSBAR + 0x00000280) + +/* + * FEC ethernet module. + */ +#define MCFFEC_BASE (MCF_IPSBAR + 0x00001000) +#define MCFFEC_SIZE 0x800 /* * GPIO registers @@ -163,6 +179,14 @@ #define MCFGPIO_PUAPAR (MCF_IPSBAR + 0x0010005C) /* + * PIT timer base addresses. + */ +#define MCFPIT_BASE1 (MCF_IPSBAR + 0x00150000) +#define MCFPIT_BASE2 (MCF_IPSBAR + 0x00160000) +#define MCFPIT_BASE3 (MCF_IPSBAR + 0x00170000) +#define MCFPIT_BASE4 (MCF_IPSBAR + 0x00180000) + +/* * Edge Port registers */ #define MCFEPORT_EPPAR (MCF_IPSBAR + 0x00130000) diff --git a/arch/m68k/include/asm/m5307sim.h b/arch/m68k/include/asm/m5307sim.h index 0bf57397e7a9..4c94c01f36c4 100644 --- a/arch/m68k/include/asm/m5307sim.h +++ b/arch/m68k/include/asm/m5307sim.h @@ -16,6 +16,7 @@ #define CPU_NAME "COLDFIRE(m5307)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m53xxacr.h> @@ -89,16 +90,30 @@ #define MCFSIM_CSCR7 0xde /* CS 7 Control reg (r/w) */ #endif /* CONFIG_OLDMASK */ -#define MCFSIM_DCR 0x100 /* DRAM Control reg (r/w) */ -#define MCFSIM_DACR0 0x108 /* DRAM 0 Addr and Ctrl (r/w) */ -#define MCFSIM_DMR0 0x10c /* DRAM 0 Mask reg (r/w) */ -#define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */ -#define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */ +#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */ +#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM Addr/Ctrl 0 */ +#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM Mask 0 */ +#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM Addr/Ctrl 1 */ +#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM Mask 1 */ + +/* + * Timer module. + */ +#define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */ +#define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */ #define MCFSIM_PADDR (MCF_MBAR + 0x244) #define MCFSIM_PADAT (MCF_MBAR + 0x248) /* + * DMA unit base addresses. + */ +#define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */ +#define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */ +#define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */ +#define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */ + +/* * UART module. */ #if defined(CONFIG_NETtel) || defined(CONFIG_SECUREEDGEMP3) diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h index e6470f8ca324..ba4cc784f574 100644 --- a/arch/m68k/include/asm/m532xsim.h +++ b/arch/m68k/include/asm/m532xsim.h @@ -11,6 +11,7 @@ #define CPU_NAME "COLDFIRE(m532x)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 3) #include <asm/m53xxacr.h> @@ -85,6 +86,14 @@ #define MCFUART_BASE2 0xFC064000 /* Base address of UART2 */ #define MCFUART_BASE3 0xFC068000 /* Base address of UART3 */ +/* + * Timer module. + */ +#define MCFTIMER_BASE1 0xFC070000 /* Base address of TIMER1 */ +#define MCFTIMER_BASE2 0xFC074000 /* Base address of TIMER2 */ +#define MCFTIMER_BASE3 0xFC078000 /* Base address of TIMER3 */ +#define MCFTIMER_BASE4 0xFC07C000 /* Base address of TIMER4 */ + /********************************************************************* * * Reset Controller Module diff --git a/arch/m68k/include/asm/m5407sim.h b/arch/m68k/include/asm/m5407sim.h index 75f5c28a551d..762c58c89050 100644 --- a/arch/m68k/include/asm/m5407sim.h +++ b/arch/m68k/include/asm/m5407sim.h @@ -16,6 +16,7 @@ #define CPU_NAME "COLDFIRE(m5407)" #define CPU_INSTR_PER_JIFFY 3 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m54xxacr.h> @@ -72,11 +73,17 @@ #define MCFSIM_CSMR7 0xd8 /* CS 7 Mask reg (r/w) */ #define MCFSIM_CSCR7 0xde /* CS 7 Control reg (r/w) */ -#define MCFSIM_DCR 0x100 /* DRAM Control reg (r/w) */ -#define MCFSIM_DACR0 0x108 /* DRAM 0 Addr and Ctrl (r/w) */ -#define MCFSIM_DMR0 0x10c /* DRAM 0 Mask reg (r/w) */ -#define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */ -#define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */ +#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */ +#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */ +#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */ +#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */ +#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */ + +/* + * Timer module. + */ +#define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */ +#define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */ #define MCFUART_BASE1 0x1c0 /* Base address of UART1 */ #define MCFUART_BASE2 0x200 /* Base address of UART2 */ @@ -85,6 +92,14 @@ #define MCFSIM_PADAT (MCF_MBAR + 0x248) /* + * DMA unit base addresses. + */ +#define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */ +#define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */ +#define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */ +#define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */ + +/* * Generic GPIO support */ #define MCFGPIO_PIN_MAX 16 diff --git a/arch/m68k/include/asm/m54xxsim.h b/arch/m68k/include/asm/m54xxsim.h index 462ae5328441..1ed8bfb02772 100644 --- a/arch/m68k/include/asm/m54xxsim.h +++ b/arch/m68k/include/asm/m54xxsim.h @@ -7,6 +7,7 @@ #define CPU_NAME "COLDFIRE(m54xx)" #define CPU_INSTR_PER_JIFFY 2 +#define MCF_BUSCLK (MCF_CLK / 2) #include <asm/m54xxacr.h> @@ -15,7 +16,8 @@ /* * Interrupt Controller Registers */ -#define MCFICM_INTC0 0x0700 /* Base for Interrupt Ctrl 0 */ +#define MCFICM_INTC0 (MCF_MBAR + 0x700) /* Base for Interrupt Ctrl 0 */ + #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ @@ -48,6 +50,16 @@ #define MCFGPIO_IRQ_VECBASE -1 /* + * EDGE Port support. + */ +#define MCFEPORT_EPPAR (MCF_MBAR + 0xf00) /* Pin assignment */ +#define MCFEPORT_EPDDR (MCF_MBAR + 0xf04) /* Data direction */ +#define MCFEPORT_EPIER (MCF_MBAR + 0xf05) /* Interrupt enable */ +#define MCFEPORT_EPDR (MCF_MBAR + 0xf08) /* Port data (w) */ +#define MCFEPORT_EPPDR (MCF_MBAR + 0xf09) /* Port data (r) */ +#define MCFEPORT_EPFR (MCF_MBAR + 0xf0c) /* Flags */ + +/* * Some PSC related definitions */ #define MCF_PAR_PSC(x) (0x000A4F-((x)&0x3)) diff --git a/arch/m68k/include/asm/mcfdma.h b/arch/m68k/include/asm/mcfdma.h index 705c52c79cd8..10bc7e391c14 100644 --- a/arch/m68k/include/asm/mcfdma.h +++ b/arch/m68k/include/asm/mcfdma.h @@ -11,29 +11,6 @@ #define mcfdma_h /****************************************************************************/ - -/* - * Get address specific defines for this Coldfire member. - */ -#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) -#define MCFDMA_BASE0 0x200 /* Base address of DMA 0 */ -#define MCFDMA_BASE1 0x240 /* Base address of DMA 1 */ -#elif defined(CONFIG_M5272) -#define MCFDMA_BASE0 0x0e0 /* Base address of DMA 0 */ -#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) -/* These are relative to the IPSBAR, not MBAR */ -#define MCFDMA_BASE0 0x100 /* Base address of DMA 0 */ -#define MCFDMA_BASE1 0x140 /* Base address of DMA 1 */ -#define MCFDMA_BASE2 0x180 /* Base address of DMA 2 */ -#define MCFDMA_BASE3 0x1C0 /* Base address of DMA 3 */ -#elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) -#define MCFDMA_BASE0 0x300 /* Base address of DMA 0 */ -#define MCFDMA_BASE1 0x340 /* Base address of DMA 1 */ -#define MCFDMA_BASE2 0x380 /* Base address of DMA 2 */ -#define MCFDMA_BASE3 0x3C0 /* Base address of DMA 3 */ -#endif - - #if !defined(CONFIG_M5272) /* diff --git a/arch/m68k/include/asm/mcfpit.h b/arch/m68k/include/asm/mcfpit.h index f570cf64fd29..9fd321ca0725 100644 --- a/arch/m68k/include/asm/mcfpit.h +++ b/arch/m68k/include/asm/mcfpit.h @@ -11,22 +11,8 @@ #define mcfpit_h /****************************************************************************/ - -/* - * Get address specific defines for the 5270/5271, 5280/5282, and 5208. - */ -#if defined(CONFIG_M520x) -#define MCFPIT_BASE1 0x00080000 /* Base address of TIMER1 */ -#define MCFPIT_BASE2 0x00084000 /* Base address of TIMER2 */ -#else -#define MCFPIT_BASE1 0x00150000 /* Base address of TIMER1 */ -#define MCFPIT_BASE2 0x00160000 /* Base address of TIMER2 */ -#define MCFPIT_BASE3 0x00170000 /* Base address of TIMER3 */ -#define MCFPIT_BASE4 0x00180000 /* Base address of TIMER4 */ -#endif - /* - * Define the PIT timer register set addresses. + * Define the PIT timer register address offsets. */ #define MCFPIT_PCSR 0x0 /* PIT control register */ #define MCFPIT_PMR 0x2 /* PIT modulus register */ diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h index 0f90f6d2227a..92b276fe8240 100644 --- a/arch/m68k/include/asm/mcftimer.h +++ b/arch/m68k/include/asm/mcftimer.h @@ -12,29 +12,6 @@ #define mcftimer_h /****************************************************************************/ - -/* - * Get address specific defines for this ColdFire member. - */ -#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) -#define MCFTIMER_BASE1 0x100 /* Base address of TIMER1 */ -#define MCFTIMER_BASE2 0x120 /* Base address of TIMER2 */ -#elif defined(CONFIG_M5272) -#define MCFTIMER_BASE1 0x200 /* Base address of TIMER1 */ -#define MCFTIMER_BASE2 0x220 /* Base address of TIMER2 */ -#define MCFTIMER_BASE3 0x240 /* Base address of TIMER4 */ -#define MCFTIMER_BASE4 0x260 /* Base address of TIMER3 */ -#elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) -#define MCFTIMER_BASE1 0x140 /* Base address of TIMER1 */ -#define MCFTIMER_BASE2 0x180 /* Base address of TIMER2 */ -#elif defined(CONFIG_M532x) -#define MCFTIMER_BASE1 0xfc070000 /* Base address of TIMER1 */ -#define MCFTIMER_BASE2 0xfc074000 /* Base address of TIMER2 */ -#define MCFTIMER_BASE3 0xfc078000 /* Base address of TIMER3 */ -#define MCFTIMER_BASE4 0xfc07c000 /* Base address of TIMER4 */ -#endif - - /* * Define the TIMER register set addresses. */ diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 06438dac08ff..18b34ee5db3b 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -37,11 +37,11 @@ static inline int set_rtc_mmss(unsigned long nowtime) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dummy) { - do_timer(1); + xtime_update(1); update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 100baaa692a1..6cb9c3a9b6c9 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -46,8 +46,8 @@ extern void mvme147_reset (void); static int bcd2int (unsigned char b); -/* Save tick handler routine pointer, will point to do_timer() in - * kernel/sched.c, called via mvme147_process_int() */ +/* Save tick handler routine pointer, will point to xtime_update() in + * kernel/time/timekeeping.c, called via mvme147_process_int() */ irq_handler_t tick_handler; diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 11edf61cc2c4..0b28e2621653 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -51,8 +51,8 @@ extern void mvme16x_reset (void); int bcd2int (unsigned char b); -/* Save tick handler routine pointer, will point to do_timer() in - * kernel/sched.c, called via mvme16x_process_int() */ +/* Save tick handler routine pointer, will point to xtime_update() in + * kernel/time/timekeeping.c, called via mvme16x_process_int() */ static irq_handler_t tick_handler; diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c index 2d9e21bd313a..6464ad3ae3e6 100644 --- a/arch/m68k/sun3/sun3ints.c +++ b/arch/m68k/sun3/sun3ints.c @@ -66,7 +66,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id) #ifdef CONFIG_SUN3 intersil_clear(); #endif - do_timer(1); + xtime_update(1); update_process_times(user_mode(get_irq_regs())); if (!(kstat_cpu(0).irqs[irq] % 20)) sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index 8b9dacaa0f6e..b5424cf948e6 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -3,6 +3,7 @@ config M68K default y select HAVE_IDE select HAVE_GENERIC_HARDIRQS + select GENERIC_HARDIRQS_NO_DEPRECATED config MMU bool @@ -78,6 +79,12 @@ config HAVE_CACHE_SPLIT config HAVE_CACHE_CB bool +config HAVE_MBAR + bool + +config HAVE_IPSBAR + bool + source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -111,12 +118,14 @@ config M68360 config M5206 bool "MCF5206" select COLDFIRE_SW_A7 + select HAVE_MBAR help Motorola ColdFire 5206 processor support. config M5206e bool "MCF5206e" select COLDFIRE_SW_A7 + select HAVE_MBAR help Motorola ColdFire 5206e processor support. @@ -131,30 +140,35 @@ config M523x bool "MCF523x" select GENERIC_CLOCKEVENTS select HAVE_CACHE_SPLIT + select HAVE_IPSBAR help Freescale Coldfire 5230/1/2/4/5 processor support config M5249 bool "MCF5249" select COLDFIRE_SW_A7 + select HAVE_MBAR help Motorola ColdFire 5249 processor support. config M5271 bool "MCF5271" select HAVE_CACHE_SPLIT + select HAVE_IPSBAR help Freescale (Motorola) ColdFire 5270/5271 processor support. config M5272 bool "MCF5272" select COLDFIRE_SW_A7 + select HAVE_MBAR help Motorola ColdFire 5272 processor support. config M5275 bool "MCF5275" select HAVE_CACHE_SPLIT + select HAVE_IPSBAR help Freescale (Motorola) ColdFire 5274/5275 processor support. @@ -162,6 +176,7 @@ config M528x bool "MCF528x" select GENERIC_CLOCKEVENTS select HAVE_CACHE_SPLIT + select HAVE_IPSBAR help Motorola ColdFire 5280/5282 processor support. @@ -169,6 +184,7 @@ config M5307 bool "MCF5307" select COLDFIRE_SW_A7 select HAVE_CACHE_CB + select HAVE_MBAR help Motorola ColdFire 5307 processor support. @@ -182,18 +198,21 @@ config M5407 bool "MCF5407" select COLDFIRE_SW_A7 select HAVE_CACHE_CB + select HAVE_MBAR help Motorola ColdFire 5407 processor support. config M547x bool "MCF547x" select HAVE_CACHE_CB + select HAVE_MBAR help Freescale ColdFire 5470/5471/5472/5473/5474/5475 processor support. config M548x bool "MCF548x" select HAVE_CACHE_CB + select HAVE_MBAR help Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support. @@ -241,17 +260,6 @@ config CLOCK_FREQ if it is fitted (there are some exceptions). This value will be specific to the exact CPU that you are using. -config CLOCK_DIV - int "Set the core/bus clock divide ratio" - default "1" - depends on CLOCK_SET - help - On many SoC style CPUs the master CPU clock is also used to drive - on-chip peripherals. The clock that is distributed to these - peripherals is sometimes a fixed ratio of the master clock - frequency. If so then set this to the divider ratio of the - master clock to the peripheral clock. If not sure then select 1. - config OLDMASK bool "Old mask 5307 (1H55J) silicon" depends on M5307 @@ -500,6 +508,12 @@ config M5407C3 help Support for the Motorola M5407C3 board. +config FIREBEE + bool "FireBee board support" + depends on M547x + help + Support for the FireBee ColdFire 5475 based board. + config CLEOPATRA bool "Feith CLEOPATRA board support" depends on (M5307 || M5407) @@ -649,6 +663,28 @@ config VECTORBASE platforms this address is programmed into the VBR register, thus actually setting the address to use. +config MBAR + hex "Address of the MBAR (internal peripherals)" + default "0x10000000" + depends on HAVE_MBAR + help + Define the address of the internal system peripherals. This value + is set in the processors MBAR register. This is generally setup by + the boot loader, and will not be written by the kernel. By far most + ColdFire boards use the default 0x10000000 value, so if unsure then + use this. + +config IPSBAR + hex "Address of the IPSBAR (internal peripherals)" + default "0x40000000" + depends on HAVE_IPSBAR + help + Define the address of the internal system peripherals. This value + is set in the processors IPSBAR register. This is generally setup by + the boot loader, and will not be written by the kernel. By far most + ColdFire boards use the default 0x40000000 value, so if unsure then + use this. + config KERNELBASE hex "Address of the base of kernel code" default "0x400" diff --git a/arch/m68knommu/kernel/irq.c b/arch/m68knommu/kernel/irq.c index c9cac36d4422..c7dd48f37bee 100644 --- a/arch/m68knommu/kernel/irq.c +++ b/arch/m68knommu/kernel/irq.c @@ -38,11 +38,13 @@ int show_interrupts(struct seq_file *p, void *v) seq_puts(p, " CPU0\n"); if (irq < NR_IRQS) { - ap = irq_desc[irq].action; + struct irq_desc *desc = irq_to_desc(irq); + + ap = desc->action; if (ap) { seq_printf(p, "%3d: ", irq); seq_printf(p, "%10u ", kstat_irqs(irq)); - seq_printf(p, "%14s ", irq_desc[irq].chip->name); + seq_printf(p, "%14s ", get_irq_desc_chip(desc)->name); seq_printf(p, "%s", ap->name); for (ap = ap->next; ap; ap = ap->next) diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index d6ac2a43453c..6623909f70e6 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c @@ -36,7 +36,7 @@ static inline int set_rtc_mmss(unsigned long nowtime) #ifndef CONFIG_GENERIC_CLOCKEVENTS /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ irqreturn_t arch_timer_interrupt(int irq, void *dummy) { @@ -44,11 +44,7 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy) if (current->pid) profile_tick(CPU_PROFILING); - write_seqlock(&xtime_lock); - - do_timer(1); - - write_sequnlock(&xtime_lock); + xtime_update(1); update_process_times(user_mode(get_irq_regs())); diff --git a/arch/m68knommu/platform/5206/gpio.c b/arch/m68knommu/platform/5206/gpio.c index 60f779ce1651..b9ab4a120f28 100644 --- a/arch/m68knommu/platform/5206/gpio.c +++ b/arch/m68knommu/platform/5206/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 8, }, - .pddr = MCFSIM_PADDR, - .podr = MCFSIM_PADAT, - .ppdr = MCFSIM_PADAT, + .pddr = (void __iomem *) MCFSIM_PADDR, + .podr = (void __iomem *) MCFSIM_PADAT, + .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; diff --git a/arch/m68knommu/platform/5206e/gpio.c b/arch/m68knommu/platform/5206e/gpio.c index 60f779ce1651..b9ab4a120f28 100644 --- a/arch/m68knommu/platform/5206e/gpio.c +++ b/arch/m68knommu/platform/5206e/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 8, }, - .pddr = MCFSIM_PADDR, - .podr = MCFSIM_PADAT, - .ppdr = MCFSIM_PADAT, + .pddr = (void __iomem *) MCFSIM_PADDR, + .podr = (void __iomem *) MCFSIM_PADAT, + .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c index 71d2ba474c63..621238f1a219 100644 --- a/arch/m68knommu/platform/520x/config.c +++ b/arch/m68knommu/platform/520x/config.c @@ -27,15 +27,15 @@ static struct mcf_platform_uart m520x_uart_platform[] = { { - .mapbase = MCF_MBAR + MCFUART_BASE1, + .mapbase = MCFUART_BASE1, .irq = MCFINT_VECBASE + MCFINT_UART0, }, { - .mapbase = MCF_MBAR + MCFUART_BASE2, + .mapbase = MCFUART_BASE2, .irq = MCFINT_VECBASE + MCFINT_UART1, }, { - .mapbase = MCF_MBAR + MCFUART_BASE3, + .mapbase = MCFUART_BASE3, .irq = MCFINT_VECBASE + MCFINT_UART2, }, { }, @@ -49,8 +49,8 @@ static struct platform_device m520x_uart = { static struct resource m520x_fec_resources[] = { { - .start = MCF_MBAR + 0x30000, - .end = MCF_MBAR + 0x30000 + 0x7ff, + .start = MCFFEC_BASE, + .end = MCFFEC_BASE + MCFFEC_SIZE - 1, .flags = IORESOURCE_MEM, }, { @@ -208,11 +208,11 @@ static void __init m520x_qspi_init(void) { u16 par; /* setup Port QS for QSPI with gpio CS control */ - writeb(0x3f, MCF_IPSBAR + MCF_GPIO_PAR_QSPI); + writeb(0x3f, MCF_GPIO_PAR_QSPI); /* make U1CTS and U2RTS gpio for cs_control */ - par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); + par = readw(MCF_GPIO_PAR_UART); par &= 0x00ff; - writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART); + writew(par, MCF_GPIO_PAR_UART); } #endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */ @@ -234,23 +234,23 @@ static void __init m520x_uart_init_line(int line, int irq) switch (line) { case 0: - par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); + par = readw(MCF_GPIO_PAR_UART); par |= MCF_GPIO_PAR_UART_PAR_UTXD0 | MCF_GPIO_PAR_UART_PAR_URXD0; - writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART); + writew(par, MCF_GPIO_PAR_UART); break; case 1: - par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); + par = readw(MCF_GPIO_PAR_UART); par |= MCF_GPIO_PAR_UART_PAR_UTXD1 | MCF_GPIO_PAR_UART_PAR_URXD1; - writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART); + writew(par, MCF_GPIO_PAR_UART); break; case 2: - par2 = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); + par2 = readb(MCF_GPIO_PAR_FECI2C); par2 &= ~0x0F; par2 |= MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 | MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2; - writeb(par2, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); + writeb(par2, MCF_GPIO_PAR_FECI2C); break; } } @@ -271,11 +271,11 @@ static void __init m520x_fec_init(void) u8 v; /* Set multi-function pins to ethernet mode */ - v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FEC); - writeb(v | 0xf0, MCF_IPSBAR + MCF_GPIO_PAR_FEC); + v = readb(MCF_GPIO_PAR_FEC); + writeb(v | 0xf0, MCF_GPIO_PAR_FEC); - v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); - writeb(v | 0x0f, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); + v = readb(MCF_GPIO_PAR_FECI2C); + writeb(v | 0x0f, MCF_GPIO_PAR_FECI2C); } /***************************************************************************/ diff --git a/arch/m68knommu/platform/520x/gpio.c b/arch/m68knommu/platform/520x/gpio.c index 15b5bb62a698..d757328563d1 100644 --- a/arch/m68knommu/platform/520x/gpio.c +++ b/arch/m68knommu/platform/520x/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 8, }, - .pddr = MCFEPORT_EPDDR, - .podr = MCFEPORT_EPDR, - .ppdr = MCFEPORT_EPPDR, + .pddr = (void __iomem *) MCFEPORT_EPDDR, + .podr = (void __iomem *) MCFEPORT_EPDR, + .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { @@ -48,11 +48,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 8, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_BUSCTL, - .podr = MCFGPIO_PODR_BUSCTL, - .ppdr = MCFGPIO_PPDSDR_BUSCTL, - .setr = MCFGPIO_PPDSDR_BUSCTL, - .clrr = MCFGPIO_PCLRR_BUSCTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, + .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { @@ -66,11 +66,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 16, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_BE, - .podr = MCFGPIO_PODR_BE, - .ppdr = MCFGPIO_PPDSDR_BE, - .setr = MCFGPIO_PPDSDR_BE, - .clrr = MCFGPIO_PCLRR_BE, + .pddr = (void __iomem *) MCFGPIO_PDDR_BE, + .podr = (void __iomem *) MCFGPIO_PODR_BE, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BE, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BE, }, { .gpio_chip = { @@ -84,11 +84,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 25, .ngpio = 3, }, - .pddr = MCFGPIO_PDDR_CS, - .podr = MCFGPIO_PODR_CS, - .ppdr = MCFGPIO_PPDSDR_CS, - .setr = MCFGPIO_PPDSDR_CS, - .clrr = MCFGPIO_PCLRR_CS, + .pddr = (void __iomem *) MCFGPIO_PDDR_CS, + .podr = (void __iomem *) MCFGPIO_PODR_CS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { @@ -102,11 +102,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_FECI2C, - .podr = MCFGPIO_PODR_FECI2C, - .ppdr = MCFGPIO_PPDSDR_FECI2C, - .setr = MCFGPIO_PPDSDR_FECI2C, - .clrr = MCFGPIO_PCLRR_FECI2C, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, + .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { @@ -120,11 +120,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 40, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_QSPI, - .podr = MCFGPIO_PODR_QSPI, - .ppdr = MCFGPIO_PPDSDR_QSPI, - .setr = MCFGPIO_PPDSDR_QSPI, - .clrr = MCFGPIO_PCLRR_QSPI, + .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, + .podr = (void __iomem *) MCFGPIO_PODR_QSPI, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { @@ -138,11 +138,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 48, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_TIMER, - .podr = MCFGPIO_PODR_TIMER, - .ppdr = MCFGPIO_PPDSDR_TIMER, - .setr = MCFGPIO_PPDSDR_TIMER, - .clrr = MCFGPIO_PCLRR_TIMER, + .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, + .podr = (void __iomem *) MCFGPIO_PODR_TIMER, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, }, { .gpio_chip = { @@ -156,11 +156,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 56, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_UART, - .podr = MCFGPIO_PODR_UART, - .ppdr = MCFGPIO_PPDSDR_UART, - .setr = MCFGPIO_PPDSDR_UART, - .clrr = MCFGPIO_PCLRR_UART, + .pddr = (void __iomem *) MCFGPIO_PDDR_UART, + .podr = (void __iomem *) MCFGPIO_PODR_UART, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UART, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UART, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UART, }, { .gpio_chip = { @@ -174,11 +174,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 64, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FECH, - .podr = MCFGPIO_PODR_FECH, - .ppdr = MCFGPIO_PPDSDR_FECH, - .setr = MCFGPIO_PPDSDR_FECH, - .clrr = MCFGPIO_PCLRR_FECH, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECH, + .podr = (void __iomem *) MCFGPIO_PODR_FECH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECH, }, { .gpio_chip = { @@ -192,11 +192,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 72, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FECL, - .podr = MCFGPIO_PODR_FECL, - .ppdr = MCFGPIO_PPDSDR_FECL, - .setr = MCFGPIO_PPDSDR_FECL, - .clrr = MCFGPIO_PCLRR_FECL, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECL, + .podr = (void __iomem *) MCFGPIO_PODR_FECL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECL, }, }; diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c index 8980f6d7715a..418a76feb1e3 100644 --- a/arch/m68knommu/platform/523x/config.c +++ b/arch/m68knommu/platform/523x/config.c @@ -28,15 +28,15 @@ static struct mcf_platform_uart m523x_uart_platform[] = { { - .mapbase = MCF_MBAR + MCFUART_BASE1, + .mapbase = MCFUART_BASE1, .irq = MCFINT_VECBASE + MCFINT_UART0, }, { - .mapbase = MCF_MBAR + MCFUART_BASE2, + .mapbase = MCFUART_BASE2, .irq = MCFINT_VECBASE + MCFINT_UART0 + 1, }, { - .mapbase = MCF_MBAR + MCFUART_BASE3, + .mapbase = MCFUART_BASE3, .irq = MCFINT_VECBASE + MCFINT_UART0 + 2, }, { }, @@ -50,8 +50,8 @@ static struct platform_device m523x_uart = { static struct resource m523x_fec_resources[] = { { - .start = MCF_MBAR + 0x1000, - .end = MCF_MBAR + 0x1000 + 0x7ff, + .start = MCFFEC_BASE, + .end = MCFFEC_BASE + MCFFEC_SIZE - 1, .flags = IORESOURCE_MEM, }, { diff --git a/arch/m68knommu/platform/523x/gpio.c b/arch/m68knommu/platform/523x/gpio.c index a8842dc27839..327ebf142c8e 100644 --- a/arch/m68knommu/platform/523x/gpio.c +++ b/arch/m68knommu/platform/523x/gpio.c @@ -33,9 +33,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 1, .ngpio = 7, }, - .pddr = MCFEPORT_EPDDR, - .podr = MCFEPORT_EPDR, - .ppdr = MCFEPORT_EPPDR, + .pddr = (void __iomem *) MCFEPORT_EPDDR, + .podr = (void __iomem *) MCFEPORT_EPDR, + .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { @@ -49,11 +49,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 13, .ngpio = 3, }, - .pddr = MCFGPIO_PDDR_ADDR, - .podr = MCFGPIO_PODR_ADDR, - .ppdr = MCFGPIO_PPDSDR_ADDR, - .setr = MCFGPIO_PPDSDR_ADDR, - .clrr = MCFGPIO_PCLRR_ADDR, + .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, + .podr = (void __iomem *) MCFGPIO_PODR_ADDR, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, + .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, + .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, }, { .gpio_chip = { @@ -67,11 +67,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 16, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_DATAH, - .podr = MCFGPIO_PODR_DATAH, - .ppdr = MCFGPIO_PPDSDR_DATAH, - .setr = MCFGPIO_PPDSDR_DATAH, - .clrr = MCFGPIO_PCLRR_DATAH, + .pddr = (void __iomem *) MCFGPIO_PDDR_DATAH, + .podr = (void __iomem *) MCFGPIO_PODR_DATAH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAH, }, { .gpio_chip = { @@ -85,11 +85,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 24, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_DATAL, - .podr = MCFGPIO_PODR_DATAL, - .ppdr = MCFGPIO_PPDSDR_DATAL, - .setr = MCFGPIO_PPDSDR_DATAL, - .clrr = MCFGPIO_PCLRR_DATAL, + .pddr = (void __iomem *) MCFGPIO_PDDR_DATAL, + .podr = (void __iomem *) MCFGPIO_PODR_DATAL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAL, }, { .gpio_chip = { @@ -103,11 +103,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_BUSCTL, - .podr = MCFGPIO_PODR_BUSCTL, - .ppdr = MCFGPIO_PPDSDR_BUSCTL, - .setr = MCFGPIO_PPDSDR_BUSCTL, - .clrr = MCFGPIO_PCLRR_BUSCTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, + .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { @@ -121,11 +121,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 40, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_BS, - .podr = MCFGPIO_PODR_BS, - .ppdr = MCFGPIO_PPDSDR_BS, - .setr = MCFGPIO_PPDSDR_BS, - .clrr = MCFGPIO_PCLRR_BS, + .pddr = (void __iomem *) MCFGPIO_PDDR_BS, + .podr = (void __iomem *) MCFGPIO_PODR_BS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, }, { .gpio_chip = { @@ -139,11 +139,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 49, .ngpio = 7, }, - .pddr = MCFGPIO_PDDR_CS, - .podr = MCFGPIO_PODR_CS, - .ppdr = MCFGPIO_PPDSDR_CS, - .setr = MCFGPIO_PPDSDR_CS, - .clrr = MCFGPIO_PCLRR_CS, + .pddr = (void __iomem *) MCFGPIO_PDDR_CS, + .podr = (void __iomem *) MCFGPIO_PODR_CS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { @@ -157,11 +157,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 56, .ngpio = 6, }, - .pddr = MCFGPIO_PDDR_SDRAM, - .podr = MCFGPIO_PODR_SDRAM, - .ppdr = MCFGPIO_PPDSDR_SDRAM, - .setr = MCFGPIO_PPDSDR_SDRAM, - .clrr = MCFGPIO_PCLRR_SDRAM, + .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, + .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, + .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, + .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, }, { .gpio_chip = { @@ -175,11 +175,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 64, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_FECI2C, - .podr = MCFGPIO_PODR_FECI2C, - .ppdr = MCFGPIO_PPDSDR_FECI2C, - .setr = MCFGPIO_PPDSDR_FECI2C, - .clrr = MCFGPIO_PCLRR_FECI2C, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, + .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { @@ -193,11 +193,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 72, .ngpio = 2, }, - .pddr = MCFGPIO_PDDR_UARTH, - .podr = MCFGPIO_PODR_UARTH, - .ppdr = MCFGPIO_PPDSDR_UARTH, - .setr = MCFGPIO_PPDSDR_UARTH, - .clrr = MCFGPIO_PCLRR_UARTH, + .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, + .podr = (void __iomem *) MCFGPIO_PODR_UARTH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, }, { .gpio_chip = { @@ -211,11 +211,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 80, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_UARTL, - .podr = MCFGPIO_PODR_UARTL, - .ppdr = MCFGPIO_PPDSDR_UARTL, - .setr = MCFGPIO_PPDSDR_UARTL, - .clrr = MCFGPIO_PCLRR_UARTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, + .podr = (void __iomem *) MCFGPIO_PODR_UARTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, }, { .gpio_chip = { @@ -229,11 +229,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 88, .ngpio = 5, }, - .pddr = MCFGPIO_PDDR_QSPI, - .podr = MCFGPIO_PODR_QSPI, - .ppdr = MCFGPIO_PPDSDR_QSPI, - .setr = MCFGPIO_PPDSDR_QSPI, - .clrr = MCFGPIO_PCLRR_QSPI, + .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, + .podr = (void __iomem *) MCFGPIO_PODR_QSPI, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { @@ -247,11 +247,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 96, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_TIMER, - .podr = MCFGPIO_PODR_TIMER, - .ppdr = MCFGPIO_PPDSDR_TIMER, - .setr = MCFGPIO_PPDSDR_TIMER, - .clrr = MCFGPIO_PCLRR_TIMER, + .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, + .podr = (void __iomem *) MCFGPIO_PODR_TIMER, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, }, { .gpio_chip = { @@ -265,11 +265,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 104, .ngpio = 3, }, - .pddr = MCFGPIO_PDDR_ETPU, - .podr = MCFGPIO_PODR_ETPU, - .ppdr = MCFGPIO_PPDSDR_ETPU, - .setr = MCFGPIO_PPDSDR_ETPU, - .clrr = MCFGPIO_PCLRR_ETPU, + .pddr = (void __iomem *) MCFGPIO_PDDR_ETPU, + .podr = (void __iomem *) MCFGPIO_PODR_ETPU, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ETPU, + .setr = (void __iomem *) MCFGPIO_PPDSDR_ETPU, + .clrr = (void __iomem *) MCFGPIO_PCLRR_ETPU, }, }; diff --git a/arch/m68knommu/platform/5249/gpio.c b/arch/m68knommu/platform/5249/gpio.c index c611eab8b3b6..2b56c6ef65bf 100644 --- a/arch/m68knommu/platform/5249/gpio.c +++ b/arch/m68knommu/platform/5249/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 32, }, - .pddr = MCFSIM2_GPIOENABLE, - .podr = MCFSIM2_GPIOWRITE, - .ppdr = MCFSIM2_GPIOREAD, + .pddr = (void __iomem *) MCFSIM2_GPIOENABLE, + .podr = (void __iomem *) MCFSIM2_GPIOWRITE, + .ppdr = (void __iomem *) MCFSIM2_GPIOREAD, }, { .gpio_chip = { @@ -48,9 +48,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 32, }, - .pddr = MCFSIM2_GPIO1ENABLE, - .podr = MCFSIM2_GPIO1WRITE, - .ppdr = MCFSIM2_GPIO1READ, + .pddr = (void __iomem *) MCFSIM2_GPIO1ENABLE, + .podr = (void __iomem *) MCFSIM2_GPIO1WRITE, + .ppdr = (void __iomem *) MCFSIM2_GPIO1READ, }, }; diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c index c5151f846591..8f4b63e17366 100644 --- a/arch/m68knommu/platform/5249/intc2.c +++ b/arch/m68knommu/platform/5249/intc2.c @@ -17,32 +17,32 @@ #include <asm/coldfire.h> #include <asm/mcfsim.h> -static void intc2_irq_gpio_mask(unsigned int irq) +static void intc2_irq_gpio_mask(struct irq_data *d) { u32 imr; imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); - imr &= ~(0x1 << (irq - MCFINTC2_GPIOIRQ0)); + imr &= ~(0x1 << (d->irq - MCFINTC2_GPIOIRQ0)); writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); } -static void intc2_irq_gpio_unmask(unsigned int irq) +static void intc2_irq_gpio_unmask(struct irq_data *d) { u32 imr; imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); - imr |= (0x1 << (irq - MCFINTC2_GPIOIRQ0)); + imr |= (0x1 << (d->irq - MCFINTC2_GPIOIRQ0)); writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); } -static void intc2_irq_gpio_ack(unsigned int irq) +static void intc2_irq_gpio_ack(struct irq_data *d) { - writel(0x1 << (irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR); + writel(0x1 << (d->irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR); } static struct irq_chip intc2_irq_gpio_chip = { .name = "CF-INTC2", - .mask = intc2_irq_gpio_mask, - .unmask = intc2_irq_gpio_unmask, - .ack = intc2_irq_gpio_ack, + .irq_mask = intc2_irq_gpio_mask, + .irq_unmask = intc2_irq_gpio_unmask, + .irq_ack = intc2_irq_gpio_ack, }; static int __init mcf_intc2_init(void) @@ -51,7 +51,7 @@ static int __init mcf_intc2_init(void) /* GPIO interrupt sources */ for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { - irq_desc[irq].chip = &intc2_irq_gpio_chip; + set_irq_chip(irq, &intc2_irq_gpio_chip); set_irq_handler(irq, handle_edge_irq); } diff --git a/arch/m68knommu/platform/5272/gpio.c b/arch/m68knommu/platform/5272/gpio.c index 459db89a89cc..57ac10a5d7f7 100644 --- a/arch/m68knommu/platform/5272/gpio.c +++ b/arch/m68knommu/platform/5272/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 16, }, - .pddr = MCFSIM_PADDR, - .podr = MCFSIM_PADAT, - .ppdr = MCFSIM_PADAT, + .pddr = (void __iomem *) MCFSIM_PADDR, + .podr = (void __iomem *) MCFSIM_PADAT, + .ppdr = (void __iomem *) MCFSIM_PADAT, }, { .gpio_chip = { @@ -48,9 +48,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 16, .ngpio = 16, }, - .pddr = MCFSIM_PBDDR, - .podr = MCFSIM_PBDAT, - .ppdr = MCFSIM_PBDAT, + .pddr = (void __iomem *) MCFSIM_PBDDR, + .podr = (void __iomem *) MCFSIM_PBDAT, + .ppdr = (void __iomem *) MCFSIM_PBDAT, }, { .gpio_chip = { @@ -64,9 +64,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 16, }, - .pddr = MCFSIM_PCDDR, - .podr = MCFSIM_PCDAT, - .ppdr = MCFSIM_PCDAT, + .pddr = (void __iomem *) MCFSIM_PCDDR, + .podr = (void __iomem *) MCFSIM_PCDAT, + .ppdr = (void __iomem *) MCFSIM_PCDAT, }, }; diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c index 3cf681c177aa..969ff0a467c6 100644 --- a/arch/m68knommu/platform/5272/intc.c +++ b/arch/m68knommu/platform/5272/intc.c @@ -78,8 +78,10 @@ static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = { * an interrupt on this irq (for the external irqs). So this mask function * is also an ack_mask function. */ -static void intc_irq_mask(unsigned int irq) +static void intc_irq_mask(struct irq_data *d) { + unsigned int irq = d->irq; + if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { u32 v; irq -= MCFINT_VECBASE; @@ -88,8 +90,10 @@ static void intc_irq_mask(unsigned int irq) } } -static void intc_irq_unmask(unsigned int irq) +static void intc_irq_unmask(struct irq_data *d) { + unsigned int irq = d->irq; + if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { u32 v; irq -= MCFINT_VECBASE; @@ -98,8 +102,10 @@ static void intc_irq_unmask(unsigned int irq) } } -static void intc_irq_ack(unsigned int irq) +static void intc_irq_ack(struct irq_data *d) { + unsigned int irq = d->irq; + /* Only external interrupts are acked */ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { irq -= MCFINT_VECBASE; @@ -113,8 +119,10 @@ static void intc_irq_ack(unsigned int irq) } } -static int intc_irq_set_type(unsigned int irq, unsigned int type) +static int intc_irq_set_type(struct irq_data *d, unsigned int type) { + unsigned int irq = d->irq; + if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { irq -= MCFINT_VECBASE; if (intc_irqmap[irq].ack) { @@ -137,20 +145,17 @@ static int intc_irq_set_type(unsigned int irq, unsigned int type) */ static void intc_external_irq(unsigned int irq, struct irq_desc *desc) { - kstat_incr_irqs_this_cpu(irq, desc); - desc->status |= IRQ_INPROGRESS; - desc->chip->ack(irq); - handle_IRQ_event(irq, desc->action); - desc->status &= ~IRQ_INPROGRESS; + get_irq_desc_chip(desc)->irq_ack(&desc->irq_data); + handle_simple_irq(irq, desc); } static struct irq_chip intc_irq_chip = { .name = "CF-INTC", - .mask = intc_irq_mask, - .unmask = intc_irq_unmask, - .mask_ack = intc_irq_mask, - .ack = intc_irq_ack, - .set_type = intc_irq_set_type, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, + .irq_mask_ack = intc_irq_mask, + .irq_ack = intc_irq_ack, + .irq_set_type = intc_irq_set_type, }; void __init init_IRQ(void) diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c index 3d9c35c98b98..fa359593b613 100644 --- a/arch/m68knommu/platform/527x/config.c +++ b/arch/m68knommu/platform/527x/config.c @@ -28,15 +28,15 @@ static struct mcf_platform_uart m527x_uart_platform[] = { { - .mapbase = MCF_MBAR + MCFUART_BASE1, + .mapbase = MCFUART_BASE1, .irq = MCFINT_VECBASE + MCFINT_UART0, }, { - .mapbase = MCF_MBAR + MCFUART_BASE2, + .mapbase = MCFUART_BASE2, .irq = MCFINT_VECBASE + MCFINT_UART1, }, { - .mapbase = MCF_MBAR + MCFUART_BASE3, + .mapbase = MCFUART_BASE3, .irq = MCFINT_VECBASE + MCFINT_UART2, }, { }, @@ -50,8 +50,8 @@ static struct platform_device m527x_uart = { static struct resource m527x_fec0_resources[] = { { - .start = MCF_MBAR + 0x1000, - .end = MCF_MBAR + 0x1000 + 0x7ff, + .start = MCFFEC_BASE0, + .end = MCFFEC_BASE0 + MCFFEC_SIZE0 - 1, .flags = IORESOURCE_MEM, }, { @@ -73,8 +73,8 @@ static struct resource m527x_fec0_resources[] = { static struct resource m527x_fec1_resources[] = { { - .start = MCF_MBAR + 0x1800, - .end = MCF_MBAR + 0x1800 + 0x7ff, + .start = MCFFEC_BASE1, + .end = MCFFEC_BASE1 + MCFFEC_SIZE1 - 1, .flags = IORESOURCE_MEM, }, { diff --git a/arch/m68knommu/platform/527x/gpio.c b/arch/m68knommu/platform/527x/gpio.c index 0b56e19db0f8..205da0aa0f2d 100644 --- a/arch/m68knommu/platform/527x/gpio.c +++ b/arch/m68knommu/platform/527x/gpio.c @@ -34,9 +34,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 1, .ngpio = 7, }, - .pddr = MCFEPORT_EPDDR, - .podr = MCFEPORT_EPDR, - .ppdr = MCFEPORT_EPPDR, + .pddr = (void __iomem *) MCFEPORT_EPDDR, + .podr = (void __iomem *) MCFEPORT_EPDR, + .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { @@ -50,11 +50,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 13, .ngpio = 3, }, - .pddr = MCFGPIO_PDDR_ADDR, - .podr = MCFGPIO_PODR_ADDR, - .ppdr = MCFGPIO_PPDSDR_ADDR, - .setr = MCFGPIO_PPDSDR_ADDR, - .clrr = MCFGPIO_PCLRR_ADDR, + .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, + .podr = (void __iomem *) MCFGPIO_PODR_ADDR, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, + .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, + .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, }, { .gpio_chip = { @@ -68,11 +68,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 16, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_DATAH, - .podr = MCFGPIO_PODR_DATAH, - .ppdr = MCFGPIO_PPDSDR_DATAH, - .setr = MCFGPIO_PPDSDR_DATAH, - .clrr = MCFGPIO_PCLRR_DATAH, + .pddr = (void __iomem *) MCFGPIO_PDDR_DATAH, + .podr = (void __iomem *) MCFGPIO_PODR_DATAH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAH, }, { .gpio_chip = { @@ -86,11 +86,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 24, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_DATAL, - .podr = MCFGPIO_PODR_DATAL, - .ppdr = MCFGPIO_PPDSDR_DATAL, - .setr = MCFGPIO_PPDSDR_DATAL, - .clrr = MCFGPIO_PCLRR_DATAL, + .pddr = (void __iomem *) MCFGPIO_PDDR_DATAL, + .podr = (void __iomem *) MCFGPIO_PODR_DATAL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAL, }, { .gpio_chip = { @@ -104,11 +104,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_BUSCTL, - .podr = MCFGPIO_PODR_BUSCTL, - .ppdr = MCFGPIO_PPDSDR_BUSCTL, - .setr = MCFGPIO_PPDSDR_BUSCTL, - .clrr = MCFGPIO_PCLRR_BUSCTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, + .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { @@ -122,11 +122,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 40, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_BS, - .podr = MCFGPIO_PODR_BS, - .ppdr = MCFGPIO_PPDSDR_BS, - .setr = MCFGPIO_PPDSDR_BS, - .clrr = MCFGPIO_PCLRR_BS, + .pddr = (void __iomem *) MCFGPIO_PDDR_BS, + .podr = (void __iomem *) MCFGPIO_PODR_BS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, }, { .gpio_chip = { @@ -140,11 +140,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 49, .ngpio = 7, }, - .pddr = MCFGPIO_PDDR_CS, - .podr = MCFGPIO_PODR_CS, - .ppdr = MCFGPIO_PPDSDR_CS, - .setr = MCFGPIO_PPDSDR_CS, - .clrr = MCFGPIO_PCLRR_CS, + .pddr = (void __iomem *) MCFGPIO_PDDR_CS, + .podr = (void __iomem *) MCFGPIO_PODR_CS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { @@ -158,11 +158,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 56, .ngpio = 6, }, - .pddr = MCFGPIO_PDDR_SDRAM, - .podr = MCFGPIO_PODR_SDRAM, - .ppdr = MCFGPIO_PPDSDR_SDRAM, - .setr = MCFGPIO_PPDSDR_SDRAM, - .clrr = MCFGPIO_PCLRR_SDRAM, + .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, + .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, + .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, + .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, }, { .gpio_chip = { @@ -176,11 +176,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 64, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_FECI2C, - .podr = MCFGPIO_PODR_FECI2C, - .ppdr = MCFGPIO_PPDSDR_FECI2C, - .setr = MCFGPIO_PPDSDR_FECI2C, - .clrr = MCFGPIO_PCLRR_FECI2C, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, + .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { @@ -194,11 +194,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 72, .ngpio = 2, }, - .pddr = MCFGPIO_PDDR_UARTH, - .podr = MCFGPIO_PODR_UARTH, - .ppdr = MCFGPIO_PPDSDR_UARTH, - .setr = MCFGPIO_PPDSDR_UARTH, - .clrr = MCFGPIO_PCLRR_UARTH, + .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, + .podr = (void __iomem *) MCFGPIO_PODR_UARTH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, }, { .gpio_chip = { @@ -212,11 +212,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 80, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_UARTL, - .podr = MCFGPIO_PODR_UARTL, - .ppdr = MCFGPIO_PPDSDR_UARTL, - .setr = MCFGPIO_PPDSDR_UARTL, - .clrr = MCFGPIO_PCLRR_UARTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, + .podr = (void __iomem *) MCFGPIO_PODR_UARTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, }, { .gpio_chip = { @@ -230,11 +230,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 88, .ngpio = 5, }, - .pddr = MCFGPIO_PDDR_QSPI, - .podr = MCFGPIO_PODR_QSPI, - .ppdr = MCFGPIO_PPDSDR_QSPI, - .setr = MCFGPIO_PPDSDR_QSPI, - .clrr = MCFGPIO_PCLRR_QSPI, + .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, + .podr = (void __iomem *) MCFGPIO_PODR_QSPI, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { @@ -248,11 +248,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 96, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_TIMER, - .podr = MCFGPIO_PODR_TIMER, - .ppdr = MCFGPIO_PPDSDR_TIMER, - .setr = MCFGPIO_PPDSDR_TIMER, - .clrr = MCFGPIO_PCLRR_TIMER, + .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, + .podr = (void __iomem *) MCFGPIO_PODR_TIMER, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, }, #elif defined(CONFIG_M5275) { @@ -267,9 +267,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 1, .ngpio = 7, }, - .pddr = MCFEPORT_EPDDR, - .podr = MCFEPORT_EPDR, - .ppdr = MCFEPORT_EPPDR, + .pddr = (void __iomem *) MCFEPORT_EPDDR, + .podr = (void __iomem *) MCFEPORT_EPDR, + .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { @@ -283,11 +283,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 8, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_BUSCTL, - .podr = MCFGPIO_PODR_BUSCTL, - .ppdr = MCFGPIO_PPDSDR_BUSCTL, - .setr = MCFGPIO_PPDSDR_BUSCTL, - .clrr = MCFGPIO_PCLRR_BUSCTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, + .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { @@ -301,11 +301,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 21, .ngpio = 3, }, - .pddr = MCFGPIO_PDDR_ADDR, - .podr = MCFGPIO_PODR_ADDR, - .ppdr = MCFGPIO_PPDSDR_ADDR, - .setr = MCFGPIO_PPDSDR_ADDR, - .clrr = MCFGPIO_PCLRR_ADDR, + .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, + .podr = (void __iomem *) MCFGPIO_PODR_ADDR, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, + .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, + .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, }, { .gpio_chip = { @@ -319,11 +319,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 25, .ngpio = 7, }, - .pddr = MCFGPIO_PDDR_CS, - .podr = MCFGPIO_PODR_CS, - .ppdr = MCFGPIO_PPDSDR_CS, - .setr = MCFGPIO_PPDSDR_CS, - .clrr = MCFGPIO_PCLRR_CS, + .pddr = (void __iomem *) MCFGPIO_PDDR_CS, + .podr = (void __iomem *) MCFGPIO_PODR_CS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { @@ -337,11 +337,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FEC0H, - .podr = MCFGPIO_PODR_FEC0H, - .ppdr = MCFGPIO_PPDSDR_FEC0H, - .setr = MCFGPIO_PPDSDR_FEC0H, - .clrr = MCFGPIO_PCLRR_FEC0H, + .pddr = (void __iomem *) MCFGPIO_PDDR_FEC0H, + .podr = (void __iomem *) MCFGPIO_PODR_FEC0H, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC0H, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC0H, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC0H, }, { .gpio_chip = { @@ -355,11 +355,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 40, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FEC0L, - .podr = MCFGPIO_PODR_FEC0L, - .ppdr = MCFGPIO_PPDSDR_FEC0L, - .setr = MCFGPIO_PPDSDR_FEC0L, - .clrr = MCFGPIO_PCLRR_FEC0L, + .pddr = (void __iomem *) MCFGPIO_PDDR_FEC0L, + .podr = (void __iomem *) MCFGPIO_PODR_FEC0L, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC0L, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC0L, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC0L, }, { .gpio_chip = { @@ -373,11 +373,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 48, .ngpio = 6, }, - .pddr = MCFGPIO_PDDR_FECI2C, - .podr = MCFGPIO_PODR_FECI2C, - .ppdr = MCFGPIO_PPDSDR_FECI2C, - .setr = MCFGPIO_PPDSDR_FECI2C, - .clrr = MCFGPIO_PCLRR_FECI2C, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, + .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { @@ -391,11 +391,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 56, .ngpio = 7, }, - .pddr = MCFGPIO_PDDR_QSPI, - .podr = MCFGPIO_PODR_QSPI, - .ppdr = MCFGPIO_PPDSDR_QSPI, - .setr = MCFGPIO_PPDSDR_QSPI, - .clrr = MCFGPIO_PCLRR_QSPI, + .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, + .podr = (void __iomem *) MCFGPIO_PODR_QSPI, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { @@ -409,11 +409,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 64, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_SDRAM, - .podr = MCFGPIO_PODR_SDRAM, - .ppdr = MCFGPIO_PPDSDR_SDRAM, - .setr = MCFGPIO_PPDSDR_SDRAM, - .clrr = MCFGPIO_PCLRR_SDRAM, + .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, + .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, + .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, + .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, }, { .gpio_chip = { @@ -427,11 +427,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 72, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_TIMERH, - .podr = MCFGPIO_PODR_TIMERH, - .ppdr = MCFGPIO_PPDSDR_TIMERH, - .setr = MCFGPIO_PPDSDR_TIMERH, - .clrr = MCFGPIO_PCLRR_TIMERH, + .pddr = (void __iomem *) MCFGPIO_PDDR_TIMERH, + .podr = (void __iomem *) MCFGPIO_PODR_TIMERH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMERH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMERH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMERH, }, { .gpio_chip = { @@ -445,11 +445,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 80, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_TIMERL, - .podr = MCFGPIO_PODR_TIMERL, - .ppdr = MCFGPIO_PPDSDR_TIMERL, - .setr = MCFGPIO_PPDSDR_TIMERL, - .clrr = MCFGPIO_PCLRR_TIMERL, + .pddr = (void __iomem *) MCFGPIO_PDDR_TIMERL, + .podr = (void __iomem *) MCFGPIO_PODR_TIMERL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMERL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMERL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMERL, }, { .gpio_chip = { @@ -463,11 +463,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 88, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_UARTL, - .podr = MCFGPIO_PODR_UARTL, - .ppdr = MCFGPIO_PPDSDR_UARTL, - .setr = MCFGPIO_PPDSDR_UARTL, - .clrr = MCFGPIO_PCLRR_UARTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, + .podr = (void __iomem *) MCFGPIO_PODR_UARTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, }, { .gpio_chip = { @@ -481,11 +481,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 96, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FEC1H, - .podr = MCFGPIO_PODR_FEC1H, - .ppdr = MCFGPIO_PPDSDR_FEC1H, - .setr = MCFGPIO_PPDSDR_FEC1H, - .clrr = MCFGPIO_PCLRR_FEC1H, + .pddr = (void __iomem *) MCFGPIO_PDDR_FEC1H, + .podr = (void __iomem *) MCFGPIO_PODR_FEC1H, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC1H, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC1H, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC1H, }, { .gpio_chip = { @@ -499,11 +499,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 104, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FEC1L, - .podr = MCFGPIO_PODR_FEC1L, - .ppdr = MCFGPIO_PPDSDR_FEC1L, - .setr = MCFGPIO_PPDSDR_FEC1L, - .clrr = MCFGPIO_PCLRR_FEC1L, + .pddr = (void __iomem *) MCFGPIO_PDDR_FEC1L, + .podr = (void __iomem *) MCFGPIO_PODR_FEC1L, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC1L, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC1L, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC1L, }, { .gpio_chip = { @@ -517,11 +517,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 114, .ngpio = 2, }, - .pddr = MCFGPIO_PDDR_BS, - .podr = MCFGPIO_PODR_BS, - .ppdr = MCFGPIO_PPDSDR_BS, - .setr = MCFGPIO_PPDSDR_BS, - .clrr = MCFGPIO_PCLRR_BS, + .pddr = (void __iomem *) MCFGPIO_PDDR_BS, + .podr = (void __iomem *) MCFGPIO_PODR_BS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, }, { .gpio_chip = { @@ -535,11 +535,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 121, .ngpio = 7, }, - .pddr = MCFGPIO_PDDR_IRQ, - .podr = MCFGPIO_PODR_IRQ, - .ppdr = MCFGPIO_PPDSDR_IRQ, - .setr = MCFGPIO_PPDSDR_IRQ, - .clrr = MCFGPIO_PCLRR_IRQ, + .pddr = (void __iomem *) MCFGPIO_PDDR_IRQ, + .podr = (void __iomem *) MCFGPIO_PODR_IRQ, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_IRQ, + .setr = (void __iomem *) MCFGPIO_PPDSDR_IRQ, + .clrr = (void __iomem *) MCFGPIO_PCLRR_IRQ, }, { .gpio_chip = { @@ -553,11 +553,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 128, .ngpio = 1, }, - .pddr = MCFGPIO_PDDR_USBH, - .podr = MCFGPIO_PODR_USBH, - .ppdr = MCFGPIO_PPDSDR_USBH, - .setr = MCFGPIO_PPDSDR_USBH, - .clrr = MCFGPIO_PCLRR_USBH, + .pddr = (void __iomem *) MCFGPIO_PDDR_USBH, + .podr = (void __iomem *) MCFGPIO_PODR_USBH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_USBH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_USBH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_USBH, }, { .gpio_chip = { @@ -571,11 +571,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 136, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_USBL, - .podr = MCFGPIO_PODR_USBL, - .ppdr = MCFGPIO_PPDSDR_USBL, - .setr = MCFGPIO_PPDSDR_USBL, - .clrr = MCFGPIO_PCLRR_USBL, + .pddr = (void __iomem *) MCFGPIO_PDDR_USBL, + .podr = (void __iomem *) MCFGPIO_PODR_USBL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_USBL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_USBL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_USBL, }, { .gpio_chip = { @@ -589,11 +589,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 144, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_UARTH, - .podr = MCFGPIO_PODR_UARTH, - .ppdr = MCFGPIO_PPDSDR_UARTH, - .setr = MCFGPIO_PPDSDR_UARTH, - .clrr = MCFGPIO_PCLRR_UARTH, + .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, + .podr = (void __iomem *) MCFGPIO_PODR_UARTH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, }, #endif }; diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c index 76b743343bfa..ac39fc661219 100644 --- a/arch/m68knommu/platform/528x/config.c +++ b/arch/m68knommu/platform/528x/config.c @@ -29,15 +29,15 @@ static struct mcf_platform_uart m528x_uart_platform[] = { { - .mapbase = MCF_MBAR + MCFUART_BASE1, + .mapbase = MCFUART_BASE1, .irq = MCFINT_VECBASE + MCFINT_UART0, }, { - .mapbase = MCF_MBAR + MCFUART_BASE2, + .mapbase = MCFUART_BASE2, .irq = MCFINT_VECBASE + MCFINT_UART0 + 1, }, { - .mapbase = MCF_MBAR + MCFUART_BASE3, + .mapbase = MCFUART_BASE3, .irq = MCFINT_VECBASE + MCFINT_UART0 + 2, }, { }, @@ -51,8 +51,8 @@ static struct platform_device m528x_uart = { static struct resource m528x_fec_resources[] = { { - .start = MCF_MBAR + 0x1000, - .end = MCF_MBAR + 0x1000 + 0x7ff, + .start = MCFFEC_BASE, + .end = MCFFEC_BASE + MCFFEC_SIZE - 1, .flags = IORESOURCE_MEM, }, { @@ -227,9 +227,9 @@ static void __init m528x_uart_init_line(int line, int irq) /* make sure PUAPAR is set for UART0 and UART1 */ if (line < 2) { - port = readb(MCF_MBAR + MCF5282_GPIO_PUAPAR); + port = readb(MCF5282_GPIO_PUAPAR); port |= (0x03 << (line * 2)); - writeb(port, MCF_MBAR + MCF5282_GPIO_PUAPAR); + writeb(port, MCF5282_GPIO_PUAPAR); } } diff --git a/arch/m68knommu/platform/528x/gpio.c b/arch/m68knommu/platform/528x/gpio.c index eedaf0adbcd7..526db665d87e 100644 --- a/arch/m68knommu/platform/528x/gpio.c +++ b/arch/m68knommu/platform/528x/gpio.c @@ -33,9 +33,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 1, .ngpio = 7, }, - .pddr = MCFEPORT_EPDDR, - .podr = MCFEPORT_EPDR, - .ppdr = MCFEPORT_EPPDR, + .pddr = (void __iomem *)MCFEPORT_EPDDR, + .podr = (void __iomem *)MCFEPORT_EPDR, + .ppdr = (void __iomem *)MCFEPORT_EPPDR, }, { .gpio_chip = { @@ -49,9 +49,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 8, .ngpio = 4, }, - .pddr = MCFGPTA_GPTDDR, - .podr = MCFGPTA_GPTPORT, - .ppdr = MCFGPTB_GPTPORT, + .pddr = (void __iomem *)MCFGPTA_GPTDDR, + .podr = (void __iomem *)MCFGPTA_GPTPORT, + .ppdr = (void __iomem *)MCFGPTB_GPTPORT, }, { .gpio_chip = { @@ -65,9 +65,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 16, .ngpio = 4, }, - .pddr = MCFGPTB_GPTDDR, - .podr = MCFGPTB_GPTPORT, - .ppdr = MCFGPTB_GPTPORT, + .pddr = (void __iomem *)MCFGPTB_GPTDDR, + .podr = (void __iomem *)MCFGPTB_GPTPORT, + .ppdr = (void __iomem *)MCFGPTB_GPTPORT, }, { .gpio_chip = { @@ -81,9 +81,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 24, .ngpio = 4, }, - .pddr = MCFQADC_DDRQA, - .podr = MCFQADC_PORTQA, - .ppdr = MCFQADC_PORTQA, + .pddr = (void __iomem *)MCFQADC_DDRQA, + .podr = (void __iomem *)MCFQADC_PORTQA, + .ppdr = (void __iomem *)MCFQADC_PORTQA, }, { .gpio_chip = { @@ -97,9 +97,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 4, }, - .pddr = MCFQADC_DDRQB, - .podr = MCFQADC_PORTQB, - .ppdr = MCFQADC_PORTQB, + .pddr = (void __iomem *)MCFQADC_DDRQB, + .podr = (void __iomem *)MCFQADC_PORTQB, + .ppdr = (void __iomem *)MCFQADC_PORTQB, }, { .gpio_chip = { @@ -113,11 +113,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 40, .ngpio = 8, }, - .pddr = MCFGPIO_DDRA, - .podr = MCFGPIO_PORTA, - .ppdr = MCFGPIO_PORTAP, - .setr = MCFGPIO_SETA, - .clrr = MCFGPIO_CLRA, + .pddr = (void __iomem *)MCFGPIO_DDRA, + .podr = (void __iomem *)MCFGPIO_PORTA, + .ppdr = (void __iomem *)MCFGPIO_PORTAP, + .setr = (void __iomem *)MCFGPIO_SETA, + .clrr = (void __iomem *)MCFGPIO_CLRA, }, { .gpio_chip = { @@ -131,11 +131,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 48, .ngpio = 8, }, - .pddr = MCFGPIO_DDRB, - .podr = MCFGPIO_PORTB, - .ppdr = MCFGPIO_PORTBP, - .setr = MCFGPIO_SETB, - .clrr = MCFGPIO_CLRB, + .pddr = (void __iomem *)MCFGPIO_DDRB, + .podr = (void __iomem *)MCFGPIO_PORTB, + .ppdr = (void __iomem *)MCFGPIO_PORTBP, + .setr = (void __iomem *)MCFGPIO_SETB, + .clrr = (void __iomem *)MCFGPIO_CLRB, }, { .gpio_chip = { @@ -149,11 +149,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 56, .ngpio = 8, }, - .pddr = MCFGPIO_DDRC, - .podr = MCFGPIO_PORTC, - .ppdr = MCFGPIO_PORTCP, - .setr = MCFGPIO_SETC, - .clrr = MCFGPIO_CLRC, + .pddr = (void __iomem *)MCFGPIO_DDRC, + .podr = (void __iomem *)MCFGPIO_PORTC, + .ppdr = (void __iomem *)MCFGPIO_PORTCP, + .setr = (void __iomem *)MCFGPIO_SETC, + .clrr = (void __iomem *)MCFGPIO_CLRC, }, { .gpio_chip = { @@ -167,11 +167,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 64, .ngpio = 8, }, - .pddr = MCFGPIO_DDRD, - .podr = MCFGPIO_PORTD, - .ppdr = MCFGPIO_PORTDP, - .setr = MCFGPIO_SETD, - .clrr = MCFGPIO_CLRD, + .pddr = (void __iomem *)MCFGPIO_DDRD, + .podr = (void __iomem *)MCFGPIO_PORTD, + .ppdr = (void __iomem *)MCFGPIO_PORTDP, + .setr = (void __iomem *)MCFGPIO_SETD, + .clrr = (void __iomem *)MCFGPIO_CLRD, }, { .gpio_chip = { @@ -185,11 +185,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 72, .ngpio = 8, }, - .pddr = MCFGPIO_DDRE, - .podr = MCFGPIO_PORTE, - .ppdr = MCFGPIO_PORTEP, - .setr = MCFGPIO_SETE, - .clrr = MCFGPIO_CLRE, + .pddr = (void __iomem *)MCFGPIO_DDRE, + .podr = (void __iomem *)MCFGPIO_PORTE, + .ppdr = (void __iomem *)MCFGPIO_PORTEP, + .setr = (void __iomem *)MCFGPIO_SETE, + .clrr = (void __iomem *)MCFGPIO_CLRE, }, { .gpio_chip = { @@ -203,11 +203,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 80, .ngpio = 8, }, - .pddr = MCFGPIO_DDRF, - .podr = MCFGPIO_PORTF, - .ppdr = MCFGPIO_PORTFP, - .setr = MCFGPIO_SETF, - .clrr = MCFGPIO_CLRF, + .pddr = (void __iomem *)MCFGPIO_DDRF, + .podr = (void __iomem *)MCFGPIO_PORTF, + .ppdr = (void __iomem *)MCFGPIO_PORTFP, + .setr = (void __iomem *)MCFGPIO_SETF, + .clrr = (void __iomem *)MCFGPIO_CLRF, }, { .gpio_chip = { @@ -221,11 +221,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 88, .ngpio = 8, }, - .pddr = MCFGPIO_DDRG, - .podr = MCFGPIO_PORTG, - .ppdr = MCFGPIO_PORTGP, - .setr = MCFGPIO_SETG, - .clrr = MCFGPIO_CLRG, + .pddr = (void __iomem *)MCFGPIO_DDRG, + .podr = (void __iomem *)MCFGPIO_PORTG, + .ppdr = (void __iomem *)MCFGPIO_PORTGP, + .setr = (void __iomem *)MCFGPIO_SETG, + .clrr = (void __iomem *)MCFGPIO_CLRG, }, { .gpio_chip = { @@ -239,11 +239,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 96, .ngpio = 8, }, - .pddr = MCFGPIO_DDRH, - .podr = MCFGPIO_PORTH, - .ppdr = MCFGPIO_PORTHP, - .setr = MCFGPIO_SETH, - .clrr = MCFGPIO_CLRH, + .pddr = (void __iomem *)MCFGPIO_DDRH, + .podr = (void __iomem *)MCFGPIO_PORTH, + .ppdr = (void __iomem *)MCFGPIO_PORTHP, + .setr = (void __iomem *)MCFGPIO_SETH, + .clrr = (void __iomem *)MCFGPIO_CLRH, }, { .gpio_chip = { @@ -257,11 +257,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 104, .ngpio = 8, }, - .pddr = MCFGPIO_DDRJ, - .podr = MCFGPIO_PORTJ, - .ppdr = MCFGPIO_PORTJP, - .setr = MCFGPIO_SETJ, - .clrr = MCFGPIO_CLRJ, + .pddr = (void __iomem *)MCFGPIO_DDRJ, + .podr = (void __iomem *)MCFGPIO_PORTJ, + .ppdr = (void __iomem *)MCFGPIO_PORTJP, + .setr = (void __iomem *)MCFGPIO_SETJ, + .clrr = (void __iomem *)MCFGPIO_CLRJ, }, { .gpio_chip = { @@ -275,11 +275,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 112, .ngpio = 8, }, - .pddr = MCFGPIO_DDRDD, - .podr = MCFGPIO_PORTDD, - .ppdr = MCFGPIO_PORTDDP, - .setr = MCFGPIO_SETDD, - .clrr = MCFGPIO_CLRDD, + .pddr = (void __iomem *)MCFGPIO_DDRDD, + .podr = (void __iomem *)MCFGPIO_PORTDD, + .ppdr = (void __iomem *)MCFGPIO_PORTDDP, + .setr = (void __iomem *)MCFGPIO_SETDD, + .clrr = (void __iomem *)MCFGPIO_CLRDD, }, { .gpio_chip = { @@ -293,11 +293,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 120, .ngpio = 8, }, - .pddr = MCFGPIO_DDREH, - .podr = MCFGPIO_PORTEH, - .ppdr = MCFGPIO_PORTEHP, - .setr = MCFGPIO_SETEH, - .clrr = MCFGPIO_CLREH, + .pddr = (void __iomem *)MCFGPIO_DDREH, + .podr = (void __iomem *)MCFGPIO_PORTEH, + .ppdr = (void __iomem *)MCFGPIO_PORTEHP, + .setr = (void __iomem *)MCFGPIO_SETEH, + .clrr = (void __iomem *)MCFGPIO_CLREH, }, { .gpio_chip = { @@ -311,11 +311,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 128, .ngpio = 8, }, - .pddr = MCFGPIO_DDREL, - .podr = MCFGPIO_PORTEL, - .ppdr = MCFGPIO_PORTELP, - .setr = MCFGPIO_SETEL, - .clrr = MCFGPIO_CLREL, + .pddr = (void __iomem *)MCFGPIO_DDREL, + .podr = (void __iomem *)MCFGPIO_PORTEL, + .ppdr = (void __iomem *)MCFGPIO_PORTELP, + .setr = (void __iomem *)MCFGPIO_SETEL, + .clrr = (void __iomem *)MCFGPIO_CLREL, }, { .gpio_chip = { @@ -329,11 +329,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 136, .ngpio = 6, }, - .pddr = MCFGPIO_DDRAS, - .podr = MCFGPIO_PORTAS, - .ppdr = MCFGPIO_PORTASP, - .setr = MCFGPIO_SETAS, - .clrr = MCFGPIO_CLRAS, + .pddr = (void __iomem *)MCFGPIO_DDRAS, + .podr = (void __iomem *)MCFGPIO_PORTAS, + .ppdr = (void __iomem *)MCFGPIO_PORTASP, + .setr = (void __iomem *)MCFGPIO_SETAS, + .clrr = (void __iomem *)MCFGPIO_CLRAS, }, { .gpio_chip = { @@ -347,11 +347,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 144, .ngpio = 7, }, - .pddr = MCFGPIO_DDRQS, - .podr = MCFGPIO_PORTQS, - .ppdr = MCFGPIO_PORTQSP, - .setr = MCFGPIO_SETQS, - .clrr = MCFGPIO_CLRQS, + .pddr = (void __iomem *)MCFGPIO_DDRQS, + .podr = (void __iomem *)MCFGPIO_PORTQS, + .ppdr = (void __iomem *)MCFGPIO_PORTQSP, + .setr = (void __iomem *)MCFGPIO_SETQS, + .clrr = (void __iomem *)MCFGPIO_CLRQS, }, { .gpio_chip = { @@ -365,11 +365,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 152, .ngpio = 6, }, - .pddr = MCFGPIO_DDRSD, - .podr = MCFGPIO_PORTSD, - .ppdr = MCFGPIO_PORTSDP, - .setr = MCFGPIO_SETSD, - .clrr = MCFGPIO_CLRSD, + .pddr = (void __iomem *)MCFGPIO_DDRSD, + .podr = (void __iomem *)MCFGPIO_PORTSD, + .ppdr = (void __iomem *)MCFGPIO_PORTSDP, + .setr = (void __iomem *)MCFGPIO_SETSD, + .clrr = (void __iomem *)MCFGPIO_CLRSD, }, { .gpio_chip = { @@ -383,11 +383,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 160, .ngpio = 4, }, - .pddr = MCFGPIO_DDRTC, - .podr = MCFGPIO_PORTTC, - .ppdr = MCFGPIO_PORTTCP, - .setr = MCFGPIO_SETTC, - .clrr = MCFGPIO_CLRTC, + .pddr = (void __iomem *)MCFGPIO_DDRTC, + .podr = (void __iomem *)MCFGPIO_PORTTC, + .ppdr = (void __iomem *)MCFGPIO_PORTTCP, + .setr = (void __iomem *)MCFGPIO_SETTC, + .clrr = (void __iomem *)MCFGPIO_CLRTC, }, { .gpio_chip = { @@ -401,11 +401,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 168, .ngpio = 4, }, - .pddr = MCFGPIO_DDRTD, - .podr = MCFGPIO_PORTTD, - .ppdr = MCFGPIO_PORTTDP, - .setr = MCFGPIO_SETTD, - .clrr = MCFGPIO_CLRTD, + .pddr = (void __iomem *)MCFGPIO_DDRTD, + .podr = (void __iomem *)MCFGPIO_PORTTD, + .ppdr = (void __iomem *)MCFGPIO_PORTTDP, + .setr = (void __iomem *)MCFGPIO_SETTD, + .clrr = (void __iomem *)MCFGPIO_CLRTD, }, { .gpio_chip = { @@ -419,11 +419,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 176, .ngpio = 4, }, - .pddr = MCFGPIO_DDRUA, - .podr = MCFGPIO_PORTUA, - .ppdr = MCFGPIO_PORTUAP, - .setr = MCFGPIO_SETUA, - .clrr = MCFGPIO_CLRUA, + .pddr = (void __iomem *)MCFGPIO_DDRUA, + .podr = (void __iomem *)MCFGPIO_PORTUA, + .ppdr = (void __iomem *)MCFGPIO_PORTUAP, + .setr = (void __iomem *)MCFGPIO_SETUA, + .clrr = (void __iomem *)MCFGPIO_CLRUA, }, }; diff --git a/arch/m68knommu/platform/5307/gpio.c b/arch/m68knommu/platform/5307/gpio.c index 8da5880e4066..5850612b4a38 100644 --- a/arch/m68knommu/platform/5307/gpio.c +++ b/arch/m68knommu/platform/5307/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 16, }, - .pddr = MCFSIM_PADDR, - .podr = MCFSIM_PADAT, - .ppdr = MCFSIM_PADAT, + .pddr = (void __iomem *) MCFSIM_PADDR, + .podr = (void __iomem *) MCFSIM_PADAT, + .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; diff --git a/arch/m68knommu/platform/532x/gpio.c b/arch/m68knommu/platform/532x/gpio.c index 184b77382c3d..212a85deac90 100644 --- a/arch/m68knommu/platform/532x/gpio.c +++ b/arch/m68knommu/platform/532x/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 8, }, - .pddr = MCFEPORT_EPDDR, - .podr = MCFEPORT_EPDR, - .ppdr = MCFEPORT_EPPDR, + .pddr = (void __iomem *) MCFEPORT_EPDDR, + .podr = (void __iomem *) MCFEPORT_EPDR, + .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { @@ -48,11 +48,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 8, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FECH, - .podr = MCFGPIO_PODR_FECH, - .ppdr = MCFGPIO_PPDSDR_FECH, - .setr = MCFGPIO_PPDSDR_FECH, - .clrr = MCFGPIO_PCLRR_FECH, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECH, + .podr = (void __iomem *) MCFGPIO_PODR_FECH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECH, }, { .gpio_chip = { @@ -66,11 +66,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 16, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_FECL, - .podr = MCFGPIO_PODR_FECL, - .ppdr = MCFGPIO_PPDSDR_FECL, - .setr = MCFGPIO_PPDSDR_FECL, - .clrr = MCFGPIO_PCLRR_FECL, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECL, + .podr = (void __iomem *) MCFGPIO_PODR_FECL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECL, }, { .gpio_chip = { @@ -84,11 +84,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 24, .ngpio = 5, }, - .pddr = MCFGPIO_PDDR_SSI, - .podr = MCFGPIO_PODR_SSI, - .ppdr = MCFGPIO_PPDSDR_SSI, - .setr = MCFGPIO_PPDSDR_SSI, - .clrr = MCFGPIO_PCLRR_SSI, + .pddr = (void __iomem *) MCFGPIO_PDDR_SSI, + .podr = (void __iomem *) MCFGPIO_PODR_SSI, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SSI, + .setr = (void __iomem *) MCFGPIO_PPDSDR_SSI, + .clrr = (void __iomem *) MCFGPIO_PCLRR_SSI, }, { .gpio_chip = { @@ -102,11 +102,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 32, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_BUSCTL, - .podr = MCFGPIO_PODR_BUSCTL, - .ppdr = MCFGPIO_PPDSDR_BUSCTL, - .setr = MCFGPIO_PPDSDR_BUSCTL, - .clrr = MCFGPIO_PCLRR_BUSCTL, + .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, + .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { @@ -120,11 +120,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 40, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_BE, - .podr = MCFGPIO_PODR_BE, - .ppdr = MCFGPIO_PPDSDR_BE, - .setr = MCFGPIO_PPDSDR_BE, - .clrr = MCFGPIO_PCLRR_BE, + .pddr = (void __iomem *) MCFGPIO_PDDR_BE, + .podr = (void __iomem *) MCFGPIO_PODR_BE, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE, + .setr = (void __iomem *) MCFGPIO_PPDSDR_BE, + .clrr = (void __iomem *) MCFGPIO_PCLRR_BE, }, { .gpio_chip = { @@ -138,11 +138,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 49, .ngpio = 5, }, - .pddr = MCFGPIO_PDDR_CS, - .podr = MCFGPIO_PODR_CS, - .ppdr = MCFGPIO_PPDSDR_CS, - .setr = MCFGPIO_PPDSDR_CS, - .clrr = MCFGPIO_PCLRR_CS, + .pddr = (void __iomem *) MCFGPIO_PDDR_CS, + .podr = (void __iomem *) MCFGPIO_PODR_CS, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, + .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { @@ -156,11 +156,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 58, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_PWM, - .podr = MCFGPIO_PODR_PWM, - .ppdr = MCFGPIO_PPDSDR_PWM, - .setr = MCFGPIO_PPDSDR_PWM, - .clrr = MCFGPIO_PCLRR_PWM, + .pddr = (void __iomem *) MCFGPIO_PDDR_PWM, + .podr = (void __iomem *) MCFGPIO_PODR_PWM, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_PWM, + .setr = (void __iomem *) MCFGPIO_PPDSDR_PWM, + .clrr = (void __iomem *) MCFGPIO_PCLRR_PWM, }, { .gpio_chip = { @@ -174,11 +174,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 64, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_FECI2C, - .podr = MCFGPIO_PODR_FECI2C, - .ppdr = MCFGPIO_PPDSDR_FECI2C, - .setr = MCFGPIO_PPDSDR_FECI2C, - .clrr = MCFGPIO_PCLRR_FECI2C, + .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, + .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, + .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { @@ -192,11 +192,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 72, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_UART, - .podr = MCFGPIO_PODR_UART, - .ppdr = MCFGPIO_PPDSDR_UART, - .setr = MCFGPIO_PPDSDR_UART, - .clrr = MCFGPIO_PCLRR_UART, + .pddr = (void __iomem *) MCFGPIO_PDDR_UART, + .podr = (void __iomem *) MCFGPIO_PODR_UART, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UART, + .setr = (void __iomem *) MCFGPIO_PPDSDR_UART, + .clrr = (void __iomem *) MCFGPIO_PCLRR_UART, }, { .gpio_chip = { @@ -210,11 +210,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 80, .ngpio = 6, }, - .pddr = MCFGPIO_PDDR_QSPI, - .podr = MCFGPIO_PODR_QSPI, - .ppdr = MCFGPIO_PPDSDR_QSPI, - .setr = MCFGPIO_PPDSDR_QSPI, - .clrr = MCFGPIO_PCLRR_QSPI, + .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, + .podr = (void __iomem *) MCFGPIO_PODR_QSPI, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, + .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { @@ -228,11 +228,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 88, .ngpio = 4, }, - .pddr = MCFGPIO_PDDR_TIMER, - .podr = MCFGPIO_PODR_TIMER, - .ppdr = MCFGPIO_PPDSDR_TIMER, - .setr = MCFGPIO_PPDSDR_TIMER, - .clrr = MCFGPIO_PCLRR_TIMER, + .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, + .podr = (void __iomem *) MCFGPIO_PODR_TIMER, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, + .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, }, { .gpio_chip = { @@ -246,11 +246,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 96, .ngpio = 2, }, - .pddr = MCFGPIO_PDDR_LCDDATAH, - .podr = MCFGPIO_PODR_LCDDATAH, - .ppdr = MCFGPIO_PPDSDR_LCDDATAH, - .setr = MCFGPIO_PPDSDR_LCDDATAH, - .clrr = MCFGPIO_PCLRR_LCDDATAH, + .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAH, + .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAH, }, { .gpio_chip = { @@ -264,11 +264,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 104, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_LCDDATAM, - .podr = MCFGPIO_PODR_LCDDATAM, - .ppdr = MCFGPIO_PPDSDR_LCDDATAM, - .setr = MCFGPIO_PPDSDR_LCDDATAM, - .clrr = MCFGPIO_PCLRR_LCDDATAM, + .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAM, + .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAM, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM, + .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM, + .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAM, }, { .gpio_chip = { @@ -282,11 +282,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 112, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_LCDDATAL, - .podr = MCFGPIO_PODR_LCDDATAL, - .ppdr = MCFGPIO_PPDSDR_LCDDATAL, - .setr = MCFGPIO_PPDSDR_LCDDATAL, - .clrr = MCFGPIO_PCLRR_LCDDATAL, + .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAL, + .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAL, }, { .gpio_chip = { @@ -300,11 +300,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 120, .ngpio = 1, }, - .pddr = MCFGPIO_PDDR_LCDCTLH, - .podr = MCFGPIO_PODR_LCDCTLH, - .ppdr = MCFGPIO_PPDSDR_LCDCTLH, - .setr = MCFGPIO_PPDSDR_LCDCTLH, - .clrr = MCFGPIO_PCLRR_LCDCTLH, + .pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLH, + .podr = (void __iomem *) MCFGPIO_PODR_LCDCTLH, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH, + .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH, + .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLH, }, { .gpio_chip = { @@ -318,11 +318,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .base = 128, .ngpio = 8, }, - .pddr = MCFGPIO_PDDR_LCDCTLL, - .podr = MCFGPIO_PODR_LCDCTLL, - .ppdr = MCFGPIO_PPDSDR_LCDCTLL, - .setr = MCFGPIO_PPDSDR_LCDCTLL, - .clrr = MCFGPIO_PCLRR_LCDCTLL, + .pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLL, + .podr = (void __iomem *) MCFGPIO_PODR_LCDCTLL, + .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL, + .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL, + .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLL, }, }; diff --git a/arch/m68knommu/platform/5407/gpio.c b/arch/m68knommu/platform/5407/gpio.c index 8da5880e4066..5850612b4a38 100644 --- a/arch/m68knommu/platform/5407/gpio.c +++ b/arch/m68knommu/platform/5407/gpio.c @@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { .set = mcf_gpio_set_value, .ngpio = 16, }, - .pddr = MCFSIM_PADDR, - .podr = MCFSIM_PADAT, - .ppdr = MCFSIM_PADAT, + .pddr = (void __iomem *) MCFSIM_PADDR, + .podr = (void __iomem *) MCFSIM_PADAT, + .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; diff --git a/arch/m68knommu/platform/54xx/Makefile b/arch/m68knommu/platform/54xx/Makefile index e6035e7a2d3f..6cfd090ec3cd 100644 --- a/arch/m68knommu/platform/54xx/Makefile +++ b/arch/m68knommu/platform/54xx/Makefile @@ -15,4 +15,5 @@ asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 obj-y := config.o +obj-$(CONFIG_FIREBEE) += firebee.o diff --git a/arch/m68knommu/platform/54xx/firebee.c b/arch/m68knommu/platform/54xx/firebee.c new file mode 100644 index 000000000000..46d50534f981 --- /dev/null +++ b/arch/m68knommu/platform/54xx/firebee.c @@ -0,0 +1,86 @@ +/***************************************************************************/ + +/* + * firebee.c -- extra startup code support for the FireBee boards + * + * Copyright (C) 2011, Greg Ungerer (gerg@snapgear.com) + */ + +/***************************************************************************/ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/physmap.h> +#include <asm/coldfire.h> +#include <asm/mcfsim.h> + +/***************************************************************************/ + +/* + * 8MB of NOR flash fitted to the FireBee board. + */ +#define FLASH_PHYS_ADDR 0xe0000000 /* Physical address of flash */ +#define FLASH_PHYS_SIZE 0x00800000 /* Size of flash */ + +#define PART_BOOT_START 0x00000000 /* Start at bottom of flash */ +#define PART_BOOT_SIZE 0x00040000 /* 256k in size */ +#define PART_IMAGE_START 0x00040000 /* Start after boot loader */ +#define PART_IMAGE_SIZE 0x006c0000 /* Most of flash */ +#define PART_FPGA_START 0x00700000 /* Start at offset 7MB */ +#define PART_FPGA_SIZE 0x00100000 /* 1MB in size */ + +static struct mtd_partition firebee_flash_parts[] = { + { + .name = "dBUG", + .offset = PART_BOOT_START, + .size = PART_BOOT_SIZE, + }, + { + .name = "FPGA", + .offset = PART_FPGA_START, + .size = PART_FPGA_SIZE, + }, + { + .name = "image", + .offset = PART_IMAGE_START, + .size = PART_IMAGE_SIZE, + }, +}; + +static struct physmap_flash_data firebee_flash_data = { + .width = 2, + .nr_parts = ARRAY_SIZE(firebee_flash_parts), + .parts = firebee_flash_parts, +}; + +static struct resource firebee_flash_resource = { + .start = FLASH_PHYS_ADDR, + .end = FLASH_PHYS_ADDR + FLASH_PHYS_SIZE, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device firebee_flash = { + .name = "physmap-flash", + .id = 0, + .dev = { + .platform_data = &firebee_flash_data, + }, + .num_resources = 1, + .resource = &firebee_flash_resource, +}; + +/***************************************************************************/ + +static int __init init_firebee(void) +{ + platform_device_register(&firebee_flash); + return 0; +} + +arch_initcall(init_firebee); + +/***************************************************************************/ diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c index 2a3af193ccd3..e5631831a200 100644 --- a/arch/m68knommu/platform/68328/ints.c +++ b/arch/m68knommu/platform/68328/ints.c @@ -135,20 +135,20 @@ void process_int(int vec, struct pt_regs *fp) } } -static void intc_irq_unmask(unsigned int irq) +static void intc_irq_unmask(struct irq_data *d) { - IMR &= ~(1<<irq); + IMR &= ~(1 << d->irq); } -static void intc_irq_mask(unsigned int irq) +static void intc_irq_mask(struct irq_data *d) { - IMR |= (1<<irq); + IMR |= (1 << d->irq); } static struct irq_chip intc_irq_chip = { .name = "M68K-INTC", - .mask = intc_irq_mask, - .unmask = intc_irq_unmask, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, }; /* diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index a29041c1a8a0..8de3feb568c6 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c @@ -37,26 +37,26 @@ extern void *_ramvec[]; /* The number of spurious interrupts */ volatile unsigned int num_spurious; -static void intc_irq_unmask(unsigned int irq) +static void intc_irq_unmask(struct irq_data *d) { - pquicc->intr_cimr |= (1 << irq); + pquicc->intr_cimr |= (1 << d->irq); } -static void intc_irq_mask(unsigned int irq) +static void intc_irq_mask(struct irq_data *d) { - pquicc->intr_cimr &= ~(1 << irq); + pquicc->intr_cimr &= ~(1 << d->irq); } -static void intc_irq_ack(unsigned int irq) +static void intc_irq_ack(struct irq_data *d) { - pquicc->intr_cisr = (1 << irq); + pquicc->intr_cisr = (1 << d->irq); } static struct irq_chip intc_irq_chip = { .name = "M68K-INTC", - .mask = intc_irq_mask, - .unmask = intc_irq_unmask, - .ack = intc_irq_ack, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, + .irq_ack = intc_irq_ack, }; /* diff --git a/arch/m68knommu/platform/coldfire/dma.c b/arch/m68knommu/platform/coldfire/dma.c index 2b30cf1b8f77..e88b95e2cc62 100644 --- a/arch/m68knommu/platform/coldfire/dma.c +++ b/arch/m68knommu/platform/coldfire/dma.c @@ -21,16 +21,16 @@ */ unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = { #ifdef MCFDMA_BASE0 - MCF_MBAR + MCFDMA_BASE0, + MCFDMA_BASE0, #endif #ifdef MCFDMA_BASE1 - MCF_MBAR + MCFDMA_BASE1, + MCFDMA_BASE1, #endif #ifdef MCFDMA_BASE2 - MCF_MBAR + MCFDMA_BASE2, + MCFDMA_BASE2, #endif #ifdef MCFDMA_BASE3 - MCF_MBAR + MCFDMA_BASE3, + MCFDMA_BASE3, #endif }; diff --git a/arch/m68knommu/platform/coldfire/head.S b/arch/m68knommu/platform/coldfire/head.S index d5977909ae5f..129bff4956b5 100644 --- a/arch/m68knommu/platform/coldfire/head.S +++ b/arch/m68knommu/platform/coldfire/head.S @@ -41,17 +41,17 @@ * DRAM controller is quite different. */ .macro GET_MEM_SIZE - movel MCF_MBAR+MCFSIM_DMR0,%d0 /* get mask for 1st bank */ + movel MCFSIM_DMR0,%d0 /* get mask for 1st bank */ btst #0,%d0 /* check if region enabled */ beq 1f andl #0xfffc0000,%d0 beq 1f addl #0x00040000,%d0 /* convert mask to size */ 1: - movel MCF_MBAR+MCFSIM_DMR1,%d1 /* get mask for 2nd bank */ + movel MCFSIM_DMR1,%d1 /* get mask for 2nd bank */ btst #0,%d1 /* check if region enabled */ beq 2f - andl #0xfffc0000, %d1 + andl #0xfffc0000,%d1 beq 2f addl #0x00040000,%d1 addl %d1,%d0 /* total mem size in d0 */ @@ -68,14 +68,14 @@ #elif defined(CONFIG_M520x) .macro GET_MEM_SIZE clrl %d0 - movel MCF_MBAR+MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */ + movel MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */ andl #0x1f, %d2 /* Get only the chip select size */ beq 3f /* Check if it is enabled */ addql #1, %d2 /* Form exponent */ moveql #1, %d0 lsll %d2, %d0 /* 2 ^ exponent */ 3: - movel MCF_MBAR+MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */ + movel MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */ andl #0x1f, %d2 /* Get only the chip select size */ beq 4f /* Check if it is enabled */ addql #1, %d2 /* Form exponent */ diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c index 85daa2b3001a..2cbfbf035db9 100644 --- a/arch/m68knommu/platform/coldfire/intc-2.c +++ b/arch/m68knommu/platform/coldfire/intc-2.c @@ -7,7 +7,10 @@ * family, the 5270, 5271, 5274, 5275, and the 528x family which have two such * controllers, and the 547x and 548x families which have only one of them. * - * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com> + * The external 7 fixed interrupts are part the the Edge Port unit of these + * ColdFire parts. They can be configured as level or edge triggered. + * + * (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive @@ -31,11 +34,12 @@ #define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */ /* - * Each vector needs a unique priority and level associated with it. - * We don't really care so much what they are, we don't rely on the - * traditional priority interrupt scheme of the m68k/ColdFire. + * The EDGE Port interrupts are the fixed 7 external interrupts. + * They need some special treatment, for example they need to be acked. */ -static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); +#define EINT0 64 /* Is not actually used, but spot reserved for it */ +#define EINT1 65 /* EDGE Port interrupt 1 */ +#define EINT7 71 /* EDGE Port interrupt 7 */ #ifdef MCFICM_INTC1 #define NR_VECS 128 @@ -43,66 +47,147 @@ static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); #define NR_VECS 64 #endif -static void intc_irq_mask(unsigned int irq) +static void intc_irq_mask(struct irq_data *d) { - if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) { - unsigned long imraddr; - u32 val, imrbit; + unsigned int irq = d->irq - MCFINT_VECBASE; + unsigned long imraddr; + u32 val, imrbit; - irq -= MCFINT_VECBASE; - imraddr = MCF_IPSBAR; #ifdef MCFICM_INTC1 - imraddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; + imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; #else - imraddr += MCFICM_INTC0; + imraddr = MCFICM_INTC0; #endif - imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL; - imrbit = 0x1 << (irq & 0x1f); + imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL; + imrbit = 0x1 << (irq & 0x1f); - val = __raw_readl(imraddr); - __raw_writel(val | imrbit, imraddr); - } + val = __raw_readl(imraddr); + __raw_writel(val | imrbit, imraddr); +} + +static void intc_irq_unmask(struct irq_data *d) +{ + unsigned int irq = d->irq - MCFINT_VECBASE; + unsigned long imraddr; + u32 val, imrbit; + +#ifdef MCFICM_INTC1 + imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; +#else + imraddr = MCFICM_INTC0; +#endif + imraddr += ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL); + imrbit = 0x1 << (irq & 0x1f); + + /* Don't set the "maskall" bit! */ + if ((irq & 0x20) == 0) + imrbit |= 0x1; + + val = __raw_readl(imraddr); + __raw_writel(val & ~imrbit, imraddr); +} + +/* + * Only the external (or EDGE Port) interrupts need to be acknowledged + * here, as part of the IRQ handler. They only really need to be ack'ed + * if they are in edge triggered mode, but there is no harm in doing it + * for all types. + */ +static void intc_irq_ack(struct irq_data *d) +{ + unsigned int irq = d->irq; + + __raw_writeb(0x1 << (irq - EINT0), MCFEPORT_EPFR); } -static void intc_irq_unmask(unsigned int irq) +/* + * Each vector needs a unique priority and level associated with it. + * We don't really care so much what they are, we don't rely on the + * traditional priority interrupt scheme of the m68k/ColdFire. This + * only needs to be set once for an interrupt, and we will never change + * these values once we have set them. + */ +static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); + +static unsigned int intc_irq_startup(struct irq_data *d) { - if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) { - unsigned long intaddr, imraddr, icraddr; - u32 val, imrbit; + unsigned int irq = d->irq - MCFINT_VECBASE; + unsigned long icraddr; - irq -= MCFINT_VECBASE; - intaddr = MCF_IPSBAR; #ifdef MCFICM_INTC1 - intaddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; + icraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; #else - intaddr += MCFICM_INTC0; + icraddr = MCFICM_INTC0; #endif - imraddr = intaddr + ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL); - icraddr = intaddr + MCFINTC_ICR0 + (irq & 0x3f); - imrbit = 0x1 << (irq & 0x1f); + icraddr += MCFINTC_ICR0 + (irq & 0x3f); + if (__raw_readb(icraddr) == 0) + __raw_writeb(intc_intpri--, icraddr); - /* Don't set the "maskall" bit! */ - if ((irq & 0x20) == 0) - imrbit |= 0x1; + irq = d->irq; + if ((irq >= EINT1) && (irq <= EINT7)) { + u8 v; - if (__raw_readb(icraddr) == 0) - __raw_writeb(intc_intpri--, icraddr); + irq -= EINT0; - val = __raw_readl(imraddr); - __raw_writel(val & ~imrbit, imraddr); + /* Set EPORT line as input */ + v = __raw_readb(MCFEPORT_EPDDR); + __raw_writeb(v & ~(0x1 << irq), MCFEPORT_EPDDR); + + /* Set EPORT line as interrupt source */ + v = __raw_readb(MCFEPORT_EPIER); + __raw_writeb(v | (0x1 << irq), MCFEPORT_EPIER); } + + intc_irq_unmask(d); + return 0; } -static int intc_irq_set_type(unsigned int irq, unsigned int type) +static int intc_irq_set_type(struct irq_data *d, unsigned int type) { + unsigned int irq = d->irq; + u16 pa, tb; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + tb = 0x1; + break; + case IRQ_TYPE_EDGE_FALLING: + tb = 0x2; + break; + case IRQ_TYPE_EDGE_BOTH: + tb = 0x3; + break; + default: + /* Level triggered */ + tb = 0; + break; + } + + if (tb) + set_irq_handler(irq, handle_edge_irq); + + irq -= EINT0; + pa = __raw_readw(MCFEPORT_EPPAR); + pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2)); + __raw_writew(pa, MCFEPORT_EPPAR); + return 0; } static struct irq_chip intc_irq_chip = { .name = "CF-INTC", - .mask = intc_irq_mask, - .unmask = intc_irq_unmask, - .set_type = intc_irq_set_type, + .irq_startup = intc_irq_startup, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, +}; + +static struct irq_chip intc_irq_chip_edge_port = { + .name = "CF-INTC-EP", + .irq_startup = intc_irq_startup, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, + .irq_ack = intc_irq_ack, + .irq_set_type = intc_irq_set_type, }; void __init init_IRQ(void) @@ -112,13 +197,16 @@ void __init init_IRQ(void) init_vectors(); /* Mask all interrupt sources */ - __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); + __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL); #ifdef MCFICM_INTC1 - __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL); + __raw_writel(0x1, MCFICM_INTC1 + MCFINTC_IMRL); #endif - for (irq = 0; (irq < NR_IRQS); irq++) { - set_irq_chip(irq, &intc_irq_chip); + for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) { + if ((irq >= EINT1) && (irq <=EINT7)) + set_irq_chip(irq, &intc_irq_chip_edge_port); + else + set_irq_chip(irq, &intc_irq_chip); set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); set_irq_handler(irq, handle_level_irq); } diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c index bb7048636140..e642b24ab729 100644 --- a/arch/m68knommu/platform/coldfire/intc-simr.c +++ b/arch/m68knommu/platform/coldfire/intc-simr.c @@ -3,7 +3,7 @@ * * Interrupt controller code for the ColdFire 5208, 5207 & 532x parts. * - * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com> + * (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive @@ -20,47 +20,156 @@ #include <asm/mcfsim.h> #include <asm/traps.h> -static void intc_irq_mask(unsigned int irq) +/* + * The EDGE Port interrupts are the fixed 7 external interrupts. + * They need some special treatment, for example they need to be acked. + */ +#ifdef CONFIG_M520x +/* + * The 520x parts only support a limited range of these external + * interrupts, only 1, 4 and 7 (as interrupts 65, 66 and 67). + */ +#define EINT0 64 /* Is not actually used, but spot reserved for it */ +#define EINT1 65 /* EDGE Port interrupt 1 */ +#define EINT4 66 /* EDGE Port interrupt 4 */ +#define EINT7 67 /* EDGE Port interrupt 7 */ + +static unsigned int irqebitmap[] = { 0, 1, 4, 7 }; +static unsigned int inline irq2ebit(unsigned int irq) { - if (irq >= MCFINT_VECBASE) { - if (irq < MCFINT_VECBASE + 64) - __raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_SIMR); - else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_SIMR) - __raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_SIMR); - } + return irqebitmap[irq - EINT0]; +} + +#else + +/* + * Most of the ColdFire parts with the EDGE Port module just have + * a strait direct mapping of the 7 external interrupts. Although + * there is a bit reserved for 0, it is not used. + */ +#define EINT0 64 /* Is not actually used, but spot reserved for it */ +#define EINT1 65 /* EDGE Port interrupt 1 */ +#define EINT7 71 /* EDGE Port interrupt 7 */ + +static unsigned int inline irq2ebit(unsigned int irq) +{ + return irq - EINT0; +} + +#endif + +/* + * There maybe one or two interrupt control units, each has 64 + * interrupts. If there is no second unit then MCFINTC1_* defines + * will be 0 (and code for them optimized away). + */ + +static void intc_irq_mask(struct irq_data *d) +{ + unsigned int irq = d->irq - MCFINT_VECBASE; + + if (MCFINTC1_SIMR && (irq > 64)) + __raw_writeb(irq - 64, MCFINTC1_SIMR); + else + __raw_writeb(irq, MCFINTC0_SIMR); } -static void intc_irq_unmask(unsigned int irq) +static void intc_irq_unmask(struct irq_data *d) { - if (irq >= MCFINT_VECBASE) { - if (irq < MCFINT_VECBASE + 64) - __raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_CIMR); - else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_CIMR) - __raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_CIMR); + unsigned int irq = d->irq - MCFINT_VECBASE; + + if (MCFINTC1_CIMR && (irq > 64)) + __raw_writeb(irq - 64, MCFINTC1_CIMR); + else + __raw_writeb(irq, MCFINTC0_CIMR); +} + +static void intc_irq_ack(struct irq_data *d) +{ + unsigned int ebit = irq2ebit(d->irq); + + __raw_writeb(0x1 << ebit, MCFEPORT_EPFR); +} + +static unsigned int intc_irq_startup(struct irq_data *d) +{ + unsigned int irq = d->irq; + + if ((irq >= EINT1) && (irq <= EINT7)) { + unsigned int ebit = irq2ebit(irq); + u8 v; + + /* Set EPORT line as input */ + v = __raw_readb(MCFEPORT_EPDDR); + __raw_writeb(v & ~(0x1 << ebit), MCFEPORT_EPDDR); + + /* Set EPORT line as interrupt source */ + v = __raw_readb(MCFEPORT_EPIER); + __raw_writeb(v | (0x1 << ebit), MCFEPORT_EPIER); } + + irq -= MCFINT_VECBASE; + if (MCFINTC1_ICR0 && (irq > 64)) + __raw_writeb(5, MCFINTC1_ICR0 + irq - 64); + else + __raw_writeb(5, MCFINTC0_ICR0 + irq); + + + intc_irq_unmask(d); + return 0; } -static int intc_irq_set_type(unsigned int irq, unsigned int type) +static int intc_irq_set_type(struct irq_data *d, unsigned int type) { - if (irq >= MCFINT_VECBASE) { - if (irq < MCFINT_VECBASE + 64) - __raw_writeb(5, MCFINTC0_ICR0 + irq - MCFINT_VECBASE); - else if ((irq < MCFINT_VECBASE) && MCFINTC1_ICR0) - __raw_writeb(5, MCFINTC1_ICR0 + irq - MCFINT_VECBASE - 64); + unsigned int ebit, irq = d->irq; + u16 pa, tb; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + tb = 0x1; + break; + case IRQ_TYPE_EDGE_FALLING: + tb = 0x2; + break; + case IRQ_TYPE_EDGE_BOTH: + tb = 0x3; + break; + default: + /* Level triggered */ + tb = 0; + break; } + + if (tb) + set_irq_handler(irq, handle_edge_irq); + + ebit = irq2ebit(irq) * 2; + pa = __raw_readw(MCFEPORT_EPPAR); + pa = (pa & ~(0x3 << ebit)) | (tb << ebit); + __raw_writew(pa, MCFEPORT_EPPAR); + return 0; } static struct irq_chip intc_irq_chip = { .name = "CF-INTC", - .mask = intc_irq_mask, - .unmask = intc_irq_unmask, - .set_type = intc_irq_set_type, + .irq_startup = intc_irq_startup, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, +}; + +static struct irq_chip intc_irq_chip_edge_port = { + .name = "CF-INTC-EP", + .irq_startup = intc_irq_startup, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, + .irq_ack = intc_irq_ack, + .irq_set_type = intc_irq_set_type, }; void __init init_IRQ(void) { - int irq; + int irq, eirq; init_vectors(); @@ -69,8 +178,12 @@ void __init init_IRQ(void) if (MCFINTC1_SIMR) __raw_writeb(0xff, MCFINTC1_SIMR); - for (irq = 0; (irq < NR_IRQS); irq++) { - set_irq_chip(irq, &intc_irq_chip); + eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0); + for (irq = MCFINT_VECBASE; (irq < eirq); irq++) { + if ((irq >= EINT1) && (irq <= EINT7)) + set_irq_chip(irq, &intc_irq_chip_edge_port); + else + set_irq_chip(irq, &intc_irq_chip); set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); set_irq_handler(irq, handle_level_irq); } diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c index 60d2fcbe182b..d648081a63f6 100644 --- a/arch/m68knommu/platform/coldfire/intc.c +++ b/arch/m68knommu/platform/coldfire/intc.c @@ -111,28 +111,28 @@ void mcf_autovector(int irq) #endif } -static void intc_irq_mask(unsigned int irq) +static void intc_irq_mask(struct irq_data *d) { - if (mcf_irq2imr[irq]) - mcf_setimr(mcf_irq2imr[irq]); + if (mcf_irq2imr[d->irq]) + mcf_setimr(mcf_irq2imr[d->irq]); } -static void intc_irq_unmask(unsigned int irq) +static void intc_irq_unmask(struct irq_data *d) { - if (mcf_irq2imr[irq]) - mcf_clrimr(mcf_irq2imr[irq]); + if (mcf_irq2imr[d->irq]) + mcf_clrimr(mcf_irq2imr[d->irq]); } -static int intc_irq_set_type(unsigned int irq, unsigned int type) +static int intc_irq_set_type(struct irq_data *d, unsigned int type) { return 0; } static struct irq_chip intc_irq_chip = { .name = "CF-INTC", - .mask = intc_irq_mask, - .unmask = intc_irq_unmask, - .set_type = intc_irq_set_type, + .irq_mask = intc_irq_mask, + .irq_unmask = intc_irq_unmask, + .irq_set_type = intc_irq_set_type, }; void __init init_IRQ(void) diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index aebea19abd78..c2b980926bec 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c @@ -31,7 +31,7 @@ * By default use timer1 as the system clock timer. */ #define FREQ ((MCF_CLK / 2) / 64) -#define TA(a) (MCF_IPSBAR + MCFPIT_BASE1 + (a)) +#define TA(a) (MCFPIT_BASE1 + (a)) #define PIT_CYCLES_PER_JIFFY (FREQ / HZ) static u32 pit_cnt; diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c index 2304d736c701..60242f65fea9 100644 --- a/arch/m68knommu/platform/coldfire/timers.c +++ b/arch/m68knommu/platform/coldfire/timers.c @@ -28,7 +28,7 @@ * By default use timer1 as the system clock timer. */ #define FREQ (MCF_BUSCLK / 16) -#define TA(a) (MCF_MBAR + MCFTIMER_BASE1 + (a)) +#define TA(a) (MCFTIMER_BASE1 + (a)) /* * These provide the underlying interrupt vector support. @@ -126,7 +126,7 @@ void hw_timer_init(void) /* * By default use timer2 as the profiler clock timer. */ -#define PA(a) (MCF_MBAR + MCFTIMER_BASE2 + (a)) +#define PA(a) (MCFTIMER_BASE2 + (a)) /* * Choose a reasonably fast profile timer. Make it an odd value to diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 31680032053e..922c4194c7bb 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -17,6 +17,7 @@ config MICROBLAZE select OF_EARLY_FLATTREE select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE + select GENERIC_HARDIRQS_NO_DEPRECATED config SWAP def_bool n @@ -183,6 +184,17 @@ config LOWMEM_SIZE hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL default "0x30000000" +config MANUAL_RESET_VECTOR + hex "Microblaze reset vector address setup" + default "0x0" + help + Set this option to have the kernel override the CPU Reset vector. + If zero, no change will be made to the MicroBlaze reset vector at + address 0x0. + If non-zero, a jump instruction to this address, will be written + to the reset vector at address 0x0. + If you are unsure, set it to default value 0x0. + config KERNEL_START_BOOL bool "Set custom kernel base address" depends on ADVANCED_OPTIONS @@ -247,7 +259,7 @@ endmenu source "mm/Kconfig" -menu "Exectuable file formats" +menu "Executable file formats" source "fs/Kconfig.binfmt" diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index 7ebd955460d9..0f553bc009a0 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h @@ -84,12 +84,13 @@ do { \ #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) - #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_mm(mm) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) + +#define flush_cache_page(vma, vmaddr, pfn) \ + flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE); /* MS: kgdb code use this macro, wrong len with FLASH */ #if 0 @@ -104,9 +105,13 @@ do { \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ u32 addr = virt_to_phys(dst); \ - invalidate_icache_range((unsigned) (addr), (unsigned) (addr) + (len));\ memcpy((dst), (src), (len)); \ - flush_dcache_range((unsigned) (addr), (unsigned) (addr) + (len));\ + if (vma->vm_flags & VM_EXEC) { \ + invalidate_icache_range((unsigned) (addr), \ + (unsigned) (addr) + PAGE_SIZE); \ + flush_dcache_range((unsigned) (addr), \ + (unsigned) (addr) + PAGE_SIZE); \ + } \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h index cd257537ae54..d8f013347a9e 100644 --- a/arch/microblaze/include/asm/cpuinfo.h +++ b/arch/microblaze/include/asm/cpuinfo.h @@ -96,8 +96,8 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu); static inline unsigned int fcpu(struct device_node *cpu, char *n) { - int *val; - return (val = (int *) of_get_property(cpu, n, NULL)) ? + const __be32 *val; + return (val = of_get_property(cpu, n, NULL)) ? be32_to_cpup(val) : 0; } diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h index ec89f2ad0fe1..af0144b91b79 100644 --- a/arch/microblaze/include/asm/entry.h +++ b/arch/microblaze/include/asm/entry.h @@ -31,40 +31,4 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ # endif /* __ASSEMBLY__ */ -#ifndef CONFIG_MMU - -/* noMMU hasn't any space for args */ -# define STATE_SAVE_ARG_SPACE (0) - -#else /* CONFIG_MMU */ - -/* If true, system calls save and restore all registers (except result - * registers, of course). If false, then `call clobbered' registers - * will not be preserved, on the theory that system calls are basically - * function calls anyway, and the caller should be able to deal with it. - * This is a security risk, of course, as `internal' values may leak out - * after a system call, but that certainly doesn't matter very much for - * a processor with no MMU protection! For a protected-mode kernel, it - * would be faster to just zero those registers before returning. - * - * I can not rely on the glibc implementation. If you turn it off make - * sure that r11/r12 is saved in user-space. --KAA - * - * These are special variables using by the kernel trap/interrupt code - * to save registers in, at a time when there are no spare registers we - * can use to do so, and we can't depend on the value of the stack - * pointer. This means that they must be within a signed 16-bit - * displacement of 0x00000000. - */ - -/* A `state save frame' is a struct pt_regs preceded by some extra space - * suitable for a function call stack frame. */ - -/* Amount of room on the stack reserved for arguments and to satisfy the - * C calling conventions, in addition to the space used by the struct - * pt_regs that actually holds saved values. */ -#define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */ - -#endif /* CONFIG_MMU */ - #endif /* _ASM_MICROBLAZE_ENTRY_H */ diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h index 6479097b802b..e6a8ddea1dca 100644 --- a/arch/microblaze/include/asm/exceptions.h +++ b/arch/microblaze/include/asm/exceptions.h @@ -66,6 +66,9 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, int fsr, int addr); +asmlinkage void sw_exception(struct pt_regs *regs); +void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); + void die(const char *str, struct pt_regs *fp, long err); void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index ad3fd61b2fe7..b0526d2716fa 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -29,7 +29,7 @@ }) static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int prev, cmp; + int ret = 0, cmp; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - __asm__ __volatile__ ("1: lwx %0, %2, r0; \ - cmp %1, %0, %3; \ - beqi %1, 3f; \ - 2: swx %4, %2, r0; \ - addic %1, r0, 0; \ - bnei %1, 1b; \ + __asm__ __volatile__ ("1: lwx %1, %3, r0; \ + cmp %2, %1, %4; \ + beqi %2, 3f; \ + 2: swx %5, %3, r0; \ + addic %2, r0, 0; \ + bnei %2, 1b; \ 3: \ .section .fixup,\"ax\"; \ 4: brid 3b; \ - addik %0, r0, %5; \ + addik %0, r0, %6; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b,2b,4b; \ .previous;" \ - : "=&r" (prev), "=&r"(cmp) \ + : "+r" (ret), "=&r" (prev), "=&r"(cmp) \ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h index ec5583d6111c..cc54187f3d38 100644 --- a/arch/microblaze/include/asm/irq.h +++ b/arch/microblaze/include/asm/irq.h @@ -12,8 +12,6 @@ #define NR_IRQS 32 #include <asm-generic/irq.h> -#include <linux/interrupt.h> - /* This type is the placeholder for a hardware interrupt number. It has to * be big enough to enclose whatever representation is used by a given * platform. diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h index 0c68764ab547..10717669e0c2 100644 --- a/arch/microblaze/include/asm/pci-bridge.h +++ b/arch/microblaze/include/asm/pci-bridge.h @@ -104,11 +104,22 @@ struct pci_controller { int global_number; /* PCI domain number */ }; +#ifdef CONFIG_PCI static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) { return bus->sysdata; } +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + struct pci_controller *host; + + if (bus->self) + return pci_device_to_OF_node(bus->self); + host = pci_bus_to_host(bus); + return host ? host->dn : NULL; +} + static inline int isa_vaddr_is_ioport(void __iomem *address) { /* No specific ISA handling on ppc32 at this stage, it @@ -116,6 +127,7 @@ static inline int isa_vaddr_is_ioport(void __iomem *address) */ return 0; } +#endif /* CONFIG_PCI */ /* These are used for config access before all the PCI probing has been done. */ diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 885574a73f01..b2af42311a12 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -572,7 +572,7 @@ void __init *early_get_page(void); extern unsigned long ioremap_bot, ioremap_base; -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); void consistent_free(size_t size, void *vaddr); void consistent_sync(void *vaddr, size_t size, int direction); void consistent_sync_page(struct page *page, unsigned long offset, diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 8eeb09211ece..aed2a6be8e27 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -155,7 +155,7 @@ unsigned long get_wchan(struct task_struct *p); # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) # define task_pt_regs_plus_args(tsk) \ - (((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE) + ((void *)task_pt_regs(tsk)) # define task_sp(task) (task_regs(task)->r1) # define task_pc(task) (task_regs(task)->pc) diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 2e72af078b05..d0890d36ef61 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h @@ -64,21 +64,6 @@ extern void kdump_move_device_tree(void); /* CPU OF node matching */ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); -/** - * of_irq_map_pci - Resolve the interrupt for a PCI device - * @pdev: the device whose interrupt is to be resolved - * @out_irq: structure of_irq filled by this function - * - * This function resolves the PCI interrupt for a given PCI device. If a - * device-node exists for a given pci_dev, it will use normal OF tree - * walking. If not, it will implement standard swizzling and walk up the - * PCI tree until an device-node is found, at which point it will finish - * resolving using the OF tree walking. - */ -struct pci_dev; -struct of_irq; -extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h index d74dbfb92c04..d9b66304d5dd 100644 --- a/arch/microblaze/include/asm/ptrace.h +++ b/arch/microblaze/include/asm/ptrace.h @@ -66,13 +66,13 @@ void show_regs(struct pt_regs *); #else /* __KERNEL__ */ /* pt_regs offsets used by gdbserver etc in ptrace syscalls */ -#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t)) -#define PT_PC (32 * sizeof(microblaze_reg_t)) -#define PT_MSR (33 * sizeof(microblaze_reg_t)) -#define PT_EAR (34 * sizeof(microblaze_reg_t)) -#define PT_ESR (35 * sizeof(microblaze_reg_t)) -#define PT_FSR (36 * sizeof(microblaze_reg_t)) -#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t)) +#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t)) +#define PT_PC (32 * sizeof(microblaze_reg_t)) +#define PT_MSR (33 * sizeof(microblaze_reg_t)) +#define PT_EAR (34 * sizeof(microblaze_reg_t)) +#define PT_ESR (35 * sizeof(microblaze_reg_t)) +#define PT_FSR (36 * sizeof(microblaze_reg_t)) +#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t)) #endif /* __KERNEL */ diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 048dfcd8d89d..9bc431783105 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h @@ -96,4 +96,7 @@ static inline void syscall_set_arguments(struct task_struct *task, microblaze_set_syscall_arg(regs, i++, *args++); } +asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); + #endif /* __ASM_MICROBLAZE_SYSCALL_H */ diff --git a/arch/microblaze/include/asm/syscalls.h b/arch/microblaze/include/asm/syscalls.h index 720761cc741f..27f2f4c0f39f 100644 --- a/arch/microblaze/include/asm/syscalls.h +++ b/arch/microblaze/include/asm/syscalls.h @@ -1,5 +1,13 @@ #ifndef __ASM_MICROBLAZE_SYSCALLS_H +asmlinkage long microblaze_vfork(struct pt_regs *regs); +asmlinkage long microblaze_clone(int flags, unsigned long stack, + struct pt_regs *regs); +asmlinkage long microblaze_execve(const char __user *filenamei, + const char __user *const __user *argv, + const char __user *const __user *envp, + struct pt_regs *regs); + asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); #define sys_clone sys_clone diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index d840f4a2d3c9..5bb95a11880d 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -120,16 +120,16 @@ static inline unsigned long __must_check __clear_user(void __user *to, { /* normal memset with two words to __ex_table */ __asm__ __volatile__ ( \ - "1: sb r0, %2, r0;" \ + "1: sb r0, %1, r0;" \ " addik %0, %0, -1;" \ " bneid %0, 1b;" \ - " addik %2, %2, 1;" \ + " addik %1, %1, 1;" \ "2: " \ __EX_TABLE_SECTION \ ".word 1b,2b;" \ ".previous;" \ - : "=r"(n) \ - : "0"(n), "r"(to) + : "=r"(n), "=r"(to) \ + : "0"(n), "1"(to) ); return n; } diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h index 2b97cbe500e9..b162ed880495 100644 --- a/arch/microblaze/include/asm/unaligned.h +++ b/arch/microblaze/include/asm/unaligned.h @@ -12,18 +12,19 @@ # ifdef __KERNEL__ -# include <linux/unaligned/be_byteshift.h> -# include <linux/unaligned/le_byteshift.h> -# include <linux/unaligned/generic.h> - - # ifdef __MICROBLAZEEL__ +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> # define get_unaligned __get_unaligned_le # define put_unaligned __put_unaligned_le # else +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be # endif +# include <linux/unaligned/generic.h> + # endif /* __KERNEL__ */ #endif /* _ASM_MICROBLAZE_UNALIGNED_H */ diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 109876e8d643..cf0afd90a2c0 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c @@ -519,7 +519,7 @@ static void __flush_dcache_range_wb(unsigned long start, unsigned long end) struct scache *mbc; /* new wb cache model */ -const struct scache wb_msr = { +static const struct scache wb_msr = { .ie = __enable_icache_msr, .id = __disable_icache_msr, .ifl = __flush_icache_all_noirq, @@ -535,7 +535,7 @@ const struct scache wb_msr = { }; /* There is only difference in ie, id, de, dd functions */ -const struct scache wb_nomsr = { +static const struct scache wb_nomsr = { .ie = __enable_icache_nomsr, .id = __disable_icache_nomsr, .ifl = __flush_icache_all_noirq, @@ -551,7 +551,7 @@ const struct scache wb_nomsr = { }; /* Old wt cache model with disabling irq and turn off cache */ -const struct scache wt_msr = { +static const struct scache wt_msr = { .ie = __enable_icache_msr, .id = __disable_icache_msr, .ifl = __flush_icache_all_msr_irq, @@ -566,7 +566,7 @@ const struct scache wt_msr = { .dinr = __invalidate_dcache_range_msr_irq_wt, }; -const struct scache wt_nomsr = { +static const struct scache wt_nomsr = { .ie = __enable_icache_nomsr, .id = __disable_icache_nomsr, .ifl = __flush_icache_all_nomsr_irq, @@ -582,7 +582,7 @@ const struct scache wt_nomsr = { }; /* New wt cache model for newer Microblaze versions */ -const struct scache wt_msr_noirq = { +static const struct scache wt_msr_noirq = { .ie = __enable_icache_msr, .id = __disable_icache_msr, .ifl = __flush_icache_all_noirq, @@ -597,7 +597,7 @@ const struct scache wt_msr_noirq = { .dinr = __invalidate_dcache_range_nomsr_wt, }; -const struct scache wt_nomsr_noirq = { +static const struct scache wt_nomsr_noirq = { .ie = __enable_icache_nomsr, .id = __disable_icache_nomsr, .ifl = __flush_icache_all_noirq, @@ -624,7 +624,7 @@ void microblaze_cache_init(void) if (cpuinfo.dcache_wb) { INFO("wb_msr"); mbc = (struct scache *)&wb_msr; - if (cpuinfo.ver_code < CPUVER_7_20_D) { + if (cpuinfo.ver_code <= CPUVER_7_20_D) { /* MS: problem with signal handling - hw bug */ INFO("WB won't work properly"); } @@ -641,7 +641,7 @@ void microblaze_cache_init(void) if (cpuinfo.dcache_wb) { INFO("wb_nomsr"); mbc = (struct scache *)&wb_nomsr; - if (cpuinfo.ver_code < CPUVER_7_20_D) { + if (cpuinfo.ver_code <= CPUVER_7_20_D) { /* MS: problem with signal handling - hw bug */ INFO("WB won't work properly"); } diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 2c309fccf230..c1640c52711f 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -33,6 +33,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = { {"7.30.b", 0x11}, {"8.00.a", 0x12}, {"8.00.b", 0x13}, + {"8.10.a", 0x14}, {NULL, 0}, }; diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 79c74659f204..393e6b2db688 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -26,6 +26,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, { switch (direction) { case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: flush_dcache_range(paddr + offset, paddr + offset + size); break; case DMA_FROM_DEVICE: diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index ca84368570b6..34b526f59b43 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S @@ -115,7 +115,7 @@ ENTRY(_interrupt) /* restore r31 */ lwi r31, r0, PER_CPU(CURRENT_SAVE) /* prepare the link register, the argument and jump */ - la r15, r0, ret_from_intr - 8 + addik r15, r0, ret_from_intr - 8 addk r6, r0, r15 braid do_IRQ add r5, r0, r1 @@ -283,7 +283,7 @@ ENTRY(_user_exception) add r12, r12, r12 /* convert num -> ptr */ add r12, r12, r12 lwi r12, r12, sys_call_table /* Get function pointer */ - la r15, r0, ret_to_user-8 /* set return address */ + addik r15, r0, ret_to_user-8 /* set return address */ bra r12 /* Make the system call. */ bri 0 /* won't reach here */ 1: diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 41c30cdb2704..ca15bc5c7449 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -33,11 +33,14 @@ #undef DEBUG -/* The size of a state save frame. */ -#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) - -/* The offset of the struct pt_regs in a `state save frame' on the stack. */ -#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */ +#ifdef DEBUG +/* Create space for syscalls counting. */ +.section .data +.global syscall_debug_table +.align 4 +syscall_debug_table: + .space (__NR_syscalls * 4) +#endif /* DEBUG */ #define C_ENTRY(name) .globl name; .align 4; name @@ -172,72 +175,72 @@ 1: #define SAVE_REGS \ - swi r2, r1, PTO+PT_R2; /* Save SDA */ \ - swi r3, r1, PTO+PT_R3; \ - swi r4, r1, PTO+PT_R4; \ - swi r5, r1, PTO+PT_R5; \ - swi r6, r1, PTO+PT_R6; \ - swi r7, r1, PTO+PT_R7; \ - swi r8, r1, PTO+PT_R8; \ - swi r9, r1, PTO+PT_R9; \ - swi r10, r1, PTO+PT_R10; \ - swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\ - swi r12, r1, PTO+PT_R12; \ - swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \ - swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \ - swi r15, r1, PTO+PT_R15; /* Save LP */ \ - swi r16, r1, PTO+PT_R16; \ - swi r17, r1, PTO+PT_R17; \ - swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \ - swi r19, r1, PTO+PT_R19; \ - swi r20, r1, PTO+PT_R20; \ - swi r21, r1, PTO+PT_R21; \ - swi r22, r1, PTO+PT_R22; \ - swi r23, r1, PTO+PT_R23; \ - swi r24, r1, PTO+PT_R24; \ - swi r25, r1, PTO+PT_R25; \ - swi r26, r1, PTO+PT_R26; \ - swi r27, r1, PTO+PT_R27; \ - swi r28, r1, PTO+PT_R28; \ - swi r29, r1, PTO+PT_R29; \ - swi r30, r1, PTO+PT_R30; \ - swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ + swi r2, r1, PT_R2; /* Save SDA */ \ + swi r3, r1, PT_R3; \ + swi r4, r1, PT_R4; \ + swi r5, r1, PT_R5; \ + swi r6, r1, PT_R6; \ + swi r7, r1, PT_R7; \ + swi r8, r1, PT_R8; \ + swi r9, r1, PT_R9; \ + swi r10, r1, PT_R10; \ + swi r11, r1, PT_R11; /* save clobbered regs after rval */\ + swi r12, r1, PT_R12; \ + swi r13, r1, PT_R13; /* Save SDA2 */ \ + swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \ + swi r15, r1, PT_R15; /* Save LP */ \ + swi r16, r1, PT_R16; \ + swi r17, r1, PT_R17; \ + swi r18, r1, PT_R18; /* Save asm scratch reg */ \ + swi r19, r1, PT_R19; \ + swi r20, r1, PT_R20; \ + swi r21, r1, PT_R21; \ + swi r22, r1, PT_R22; \ + swi r23, r1, PT_R23; \ + swi r24, r1, PT_R24; \ + swi r25, r1, PT_R25; \ + swi r26, r1, PT_R26; \ + swi r27, r1, PT_R27; \ + swi r28, r1, PT_R28; \ + swi r29, r1, PT_R29; \ + swi r30, r1, PT_R30; \ + swi r31, r1, PT_R31; /* Save current task reg */ \ mfs r11, rmsr; /* save MSR */ \ - swi r11, r1, PTO+PT_MSR; + swi r11, r1, PT_MSR; #define RESTORE_REGS \ - lwi r11, r1, PTO+PT_MSR; \ + lwi r11, r1, PT_MSR; \ mts rmsr , r11; \ - lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ - lwi r3, r1, PTO+PT_R3; \ - lwi r4, r1, PTO+PT_R4; \ - lwi r5, r1, PTO+PT_R5; \ - lwi r6, r1, PTO+PT_R6; \ - lwi r7, r1, PTO+PT_R7; \ - lwi r8, r1, PTO+PT_R8; \ - lwi r9, r1, PTO+PT_R9; \ - lwi r10, r1, PTO+PT_R10; \ - lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\ - lwi r12, r1, PTO+PT_R12; \ - lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \ - lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ - lwi r15, r1, PTO+PT_R15; /* restore LP */ \ - lwi r16, r1, PTO+PT_R16; \ - lwi r17, r1, PTO+PT_R17; \ - lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \ - lwi r19, r1, PTO+PT_R19; \ - lwi r20, r1, PTO+PT_R20; \ - lwi r21, r1, PTO+PT_R21; \ - lwi r22, r1, PTO+PT_R22; \ - lwi r23, r1, PTO+PT_R23; \ - lwi r24, r1, PTO+PT_R24; \ - lwi r25, r1, PTO+PT_R25; \ - lwi r26, r1, PTO+PT_R26; \ - lwi r27, r1, PTO+PT_R27; \ - lwi r28, r1, PTO+PT_R28; \ - lwi r29, r1, PTO+PT_R29; \ - lwi r30, r1, PTO+PT_R30; \ - lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ + lwi r2, r1, PT_R2; /* restore SDA */ \ + lwi r3, r1, PT_R3; \ + lwi r4, r1, PT_R4; \ + lwi r5, r1, PT_R5; \ + lwi r6, r1, PT_R6; \ + lwi r7, r1, PT_R7; \ + lwi r8, r1, PT_R8; \ + lwi r9, r1, PT_R9; \ + lwi r10, r1, PT_R10; \ + lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\ + lwi r12, r1, PT_R12; \ + lwi r13, r1, PT_R13; /* restore SDA2 */ \ + lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ + lwi r15, r1, PT_R15; /* restore LP */ \ + lwi r16, r1, PT_R16; \ + lwi r17, r1, PT_R17; \ + lwi r18, r1, PT_R18; /* restore asm scratch reg */ \ + lwi r19, r1, PT_R19; \ + lwi r20, r1, PT_R20; \ + lwi r21, r1, PT_R21; \ + lwi r22, r1, PT_R22; \ + lwi r23, r1, PT_R23; \ + lwi r24, r1, PT_R24; \ + lwi r25, r1, PT_R25; \ + lwi r26, r1, PT_R26; \ + lwi r27, r1, PT_R27; \ + lwi r28, r1, PT_R28; \ + lwi r29, r1, PT_R29; \ + lwi r30, r1, PT_R30; \ + lwi r31, r1, PT_R31; /* Restore cur task reg */ #define SAVE_STATE \ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ @@ -250,11 +253,11 @@ lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ /* FIXME: I can add these two lines to one */ \ /* tophys(r1,r1); */ \ - /* addik r1, r1, -STATE_SAVE_SIZE; */ \ - addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ + /* addik r1, r1, -PT_SIZE; */ \ + addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ SAVE_REGS \ brid 2f; \ - swi r1, r1, PTO+PT_MODE; \ + swi r1, r1, PT_MODE; \ 1: /* User-mode state save. */ \ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ tophys(r1,r1); \ @@ -262,12 +265,12 @@ /* MS these three instructions can be added to one */ \ /* addik r1, r1, THREAD_SIZE; */ \ /* tophys(r1,r1); */ \ - /* addik r1, r1, -STATE_SAVE_SIZE; */ \ - addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ + /* addik r1, r1, -PT_SIZE; */ \ + addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ SAVE_REGS \ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ - swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ - swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \ + swi r11, r1, PT_R1; /* Store user SP. */ \ + swi r0, r1, PT_MODE; /* Was in user-mode. */ \ /* MS: I am clearing UMS even in case when I come from kernel space */ \ clear_ums; \ 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); @@ -299,10 +302,10 @@ C_ENTRY(_user_exception): lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ tophys(r1,r1); - addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ + addik r1, r1, -PT_SIZE; /* Make room on the stack. */ SAVE_REGS - swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */ + swi r1, r1, PT_MODE; /* pt_regs -> kernel mode */ brid 2f; nop; /* Fill delay slot */ @@ -315,18 +318,18 @@ C_ENTRY(_user_exception): addik r1, r1, THREAD_SIZE; tophys(r1,r1); - addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ + addik r1, r1, -PT_SIZE; /* Make room on the stack. */ SAVE_REGS - swi r0, r1, PTO + PT_R3 - swi r0, r1, PTO + PT_R4 + swi r0, r1, PT_R3 + swi r0, r1, PT_R4 - swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ + swi r0, r1, PT_MODE; /* Was in user-mode. */ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); - swi r11, r1, PTO+PT_R1; /* Store user SP. */ + swi r11, r1, PT_R1; /* Store user SP. */ clear_ums; 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* Save away the syscall number. */ - swi r12, r1, PTO+PT_R0; + swi r12, r1, PT_R0; tovirt(r1,r1) /* where the trap should return need -8 to adjust for rtsd r15, 8*/ @@ -345,18 +348,18 @@ C_ENTRY(_user_exception): beqi r11, 4f addik r3, r0, -ENOSYS - swi r3, r1, PTO + PT_R3 + swi r3, r1, PT_R3 brlid r15, do_syscall_trace_enter - addik r5, r1, PTO + PT_R0 + addik r5, r1, PT_R0 # do_syscall_trace_enter returns the new syscall nr. addk r12, r0, r3 - lwi r5, r1, PTO+PT_R5; - lwi r6, r1, PTO+PT_R6; - lwi r7, r1, PTO+PT_R7; - lwi r8, r1, PTO+PT_R8; - lwi r9, r1, PTO+PT_R9; - lwi r10, r1, PTO+PT_R10; + lwi r5, r1, PT_R5; + lwi r6, r1, PT_R6; + lwi r7, r1, PT_R7; + lwi r8, r1, PT_R8; + lwi r9, r1, PT_R9; + lwi r10, r1, PT_R10; 4: /* Jump to the appropriate function for the system call number in r12 * (r12 is not preserved), or return an error if r12 is not valid. @@ -371,10 +374,14 @@ C_ENTRY(_user_exception): add r12, r12, r12; #ifdef DEBUG - /* Trac syscalls and stored them to r0_ram */ - lwi r3, r12, 0x400 + r0_ram + /* Trac syscalls and stored them to syscall_debug_table */ + /* The first syscall location stores total syscall number */ + lwi r3, r0, syscall_debug_table + addi r3, r3, 1 + swi r3, r0, syscall_debug_table + lwi r3, r12, syscall_debug_table addi r3, r3, 1 - swi r3, r12, 0x400 + r0_ram + swi r3, r12, syscall_debug_table #endif # Find and jump into the syscall handler. @@ -391,10 +398,10 @@ C_ENTRY(_user_exception): /* Entry point used to return from a syscall/trap */ /* We re-enable BIP bit before state restore */ C_ENTRY(ret_from_trap): - swi r3, r1, PTO + PT_R3 - swi r4, r1, PTO + PT_R4 + swi r3, r1, PT_R3 + swi r4, r1, PT_R4 - lwi r11, r1, PTO + PT_MODE; + lwi r11, r1, PT_MODE; /* See if returning to kernel mode, if so, skip resched &c. */ bnei r11, 2f; /* We're returning to user mode, so check for various conditions that @@ -406,7 +413,7 @@ C_ENTRY(ret_from_trap): beqi r11, 1f brlid r15, do_syscall_trace_leave - addik r5, r1, PTO + PT_R0 + addik r5, r1, PT_R0 1: /* We're returning to user mode, so check for various conditions that * trigger rescheduling. */ @@ -426,7 +433,7 @@ C_ENTRY(ret_from_trap): andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ - addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ addi r7, r0, 1; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ add r6, r0, r0; /* Arg 2: sigset_t *oldset */ @@ -437,7 +444,7 @@ C_ENTRY(ret_from_trap): VM_OFF; tophys(r1,r1); RESTORE_REGS; - addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ + addik r1, r1, PT_SIZE /* Clean up stack space. */ lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ bri 6f; @@ -446,7 +453,7 @@ C_ENTRY(ret_from_trap): VM_OFF; tophys(r1,r1); RESTORE_REGS; - addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ + addik r1, r1, PT_SIZE /* Clean up stack space. */ tovirt(r1,r1); 6: TRAP_return: /* Make global symbol for debugging */ @@ -459,8 +466,8 @@ TRAP_return: /* Make global symbol for debugging */ C_ENTRY(sys_fork_wrapper): addi r5, r0, SIGCHLD /* Arg 0: flags */ - lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ - addik r7, r1, PTO /* Arg 2: parent context */ + lwi r6, r1, PT_R1 /* Arg 1: child SP (use parent's) */ + addik r7, r1, 0 /* Arg 2: parent context */ add r8. r0, r0 /* Arg 3: (unused) */ add r9, r0, r0; /* Arg 4: (unused) */ brid do_fork /* Do real work (tail-call) */ @@ -480,12 +487,12 @@ C_ENTRY(ret_from_fork): C_ENTRY(sys_vfork): brid microblaze_vfork /* Do real work (tail-call) */ - addik r5, r1, PTO + addik r5, r1, 0 C_ENTRY(sys_clone): bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ - lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */ -1: addik r7, r1, PTO; /* Arg 2: parent context */ + lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */ +1: addik r7, r1, 0; /* Arg 2: parent context */ add r8, r0, r0; /* Arg 3: (unused) */ add r9, r0, r0; /* Arg 4: (unused) */ brid do_fork /* Do real work (tail-call) */ @@ -493,11 +500,11 @@ C_ENTRY(sys_clone): C_ENTRY(sys_execve): brid microblaze_execve; /* Do real work (tail-call).*/ - addik r8, r1, PTO; /* add user context as 4th arg */ + addik r8, r1, 0; /* add user context as 4th arg */ C_ENTRY(sys_rt_sigreturn_wrapper): brid sys_rt_sigreturn /* Do real work */ - addik r5, r1, PTO; /* add user context as 1st arg */ + addik r5, r1, 0; /* add user context as 1st arg */ /* * HW EXCEPTION rutine start @@ -508,7 +515,7 @@ C_ENTRY(full_exception_trap): addik r17, r17, -4 SAVE_STATE /* Save registers */ /* PC, before IRQ/trap - this is one instruction above */ - swi r17, r1, PTO+PT_PC; + swi r17, r1, PT_PC; tovirt(r1,r1) /* FIXME this can be store directly in PT_ESR reg. * I tested it but there is a fault */ @@ -518,7 +525,7 @@ C_ENTRY(full_exception_trap): mfs r7, rfsr; /* save FSR */ mts rfsr, r0; /* Clear sticky fsr */ rted r0, full_exception - addik r5, r1, PTO /* parameter struct pt_regs * regs */ + addik r5, r1, 0 /* parameter struct pt_regs * regs */ /* * Unaligned data trap. @@ -544,14 +551,14 @@ C_ENTRY(unaligned_data_trap): lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); SAVE_STATE /* Save registers.*/ /* PC, before IRQ/trap - this is one instruction above */ - swi r17, r1, PTO+PT_PC; + swi r17, r1, PT_PC; tovirt(r1,r1) /* where the trap should return need -8 to adjust for rtsd r15, 8 */ addik r15, r0, ret_from_exc-8 mfs r3, resr /* ESR */ mfs r4, rear /* EAR */ rtbd r0, _unaligned_data_exception - addik r7, r1, PTO /* parameter struct pt_regs * regs */ + addik r7, r1, 0 /* parameter struct pt_regs * regs */ /* * Page fault traps. @@ -574,30 +581,30 @@ C_ENTRY(unaligned_data_trap): C_ENTRY(page_fault_data_trap): SAVE_STATE /* Save registers.*/ /* PC, before IRQ/trap - this is one instruction above */ - swi r17, r1, PTO+PT_PC; + swi r17, r1, PT_PC; tovirt(r1,r1) /* where the trap should return need -8 to adjust for rtsd r15, 8 */ addik r15, r0, ret_from_exc-8 mfs r6, rear /* parameter unsigned long address */ mfs r7, resr /* parameter unsigned long error_code */ rted r0, do_page_fault - addik r5, r1, PTO /* parameter struct pt_regs * regs */ + addik r5, r1, 0 /* parameter struct pt_regs * regs */ C_ENTRY(page_fault_instr_trap): SAVE_STATE /* Save registers.*/ /* PC, before IRQ/trap - this is one instruction above */ - swi r17, r1, PTO+PT_PC; + swi r17, r1, PT_PC; tovirt(r1,r1) /* where the trap should return need -8 to adjust for rtsd r15, 8 */ addik r15, r0, ret_from_exc-8 mfs r6, rear /* parameter unsigned long address */ ori r7, r0, 0 /* parameter unsigned long error_code */ rted r0, do_page_fault - addik r5, r1, PTO /* parameter struct pt_regs * regs */ + addik r5, r1, 0 /* parameter struct pt_regs * regs */ /* Entry point used to return from an exception. */ C_ENTRY(ret_from_exc): - lwi r11, r1, PTO + PT_MODE; + lwi r11, r1, PT_MODE; bnei r11, 2f; /* See if returning to kernel mode, */ /* ... if so, skip resched &c. */ @@ -629,7 +636,7 @@ C_ENTRY(ret_from_exc): * complete register state. Here we save anything not saved by * the normal entry sequence, so that it may be safely restored * (in a possibly modified form) after do_signal returns. */ - addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ addi r7, r0, 0; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ add r6, r0, r0; /* Arg 2: sigset_t *oldset */ @@ -641,7 +648,7 @@ C_ENTRY(ret_from_exc): tophys(r1,r1); RESTORE_REGS; - addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ + addik r1, r1, PT_SIZE /* Clean up stack space. */ lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ bri 6f; @@ -650,7 +657,7 @@ C_ENTRY(ret_from_exc): VM_OFF; tophys(r1,r1); RESTORE_REGS; - addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ + addik r1, r1, PT_SIZE /* Clean up stack space. */ tovirt(r1,r1); 6: @@ -683,10 +690,10 @@ C_ENTRY(_interrupt): tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ /* save registers */ /* MS: Make room on the stack -> activation record */ - addik r1, r1, -STATE_SAVE_SIZE; + addik r1, r1, -PT_SIZE; SAVE_REGS brid 2f; - swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */ + swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */ 1: /* User-mode state save. */ /* MS: get the saved current */ @@ -696,23 +703,23 @@ C_ENTRY(_interrupt): addik r1, r1, THREAD_SIZE; tophys(r1,r1); /* save registers */ - addik r1, r1, -STATE_SAVE_SIZE; + addik r1, r1, -PT_SIZE; SAVE_REGS /* calculate mode */ - swi r0, r1, PTO + PT_MODE; + swi r0, r1, PT_MODE; lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); - swi r11, r1, PTO+PT_R1; + swi r11, r1, PT_R1; clear_ums; 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); tovirt(r1,r1) addik r15, r0, irq_call; irq_call:rtbd r0, do_IRQ; - addik r5, r1, PTO; + addik r5, r1, 0; /* MS: we are in virtual mode */ ret_from_irq: - lwi r11, r1, PTO + PT_MODE; + lwi r11, r1, PT_MODE; bnei r11, 2f; lwi r11, CURRENT_TASK, TS_THREAD_INFO; @@ -729,7 +736,7 @@ ret_from_irq: beqid r11, no_intr_resched /* Handle a signal return; Pending signals should be in r18. */ addi r7, r0, 0; /* Arg 3: int in_syscall */ - addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ bralid r15, do_signal; /* Handle any signals */ add r6, r0, r0; /* Arg 2: sigset_t *oldset */ @@ -741,7 +748,7 @@ no_intr_resched: VM_OFF; tophys(r1,r1); RESTORE_REGS - addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ + addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ lwi r1, r1, PT_R1 - PT_SIZE; bri 6f; /* MS: Return to kernel state. */ @@ -769,7 +776,7 @@ restore: VM_OFF /* MS: turn off MMU */ tophys(r1,r1) RESTORE_REGS - addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ + addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ tovirt(r1,r1); 6: IRQ_return: /* MS: Make global symbol for debugging */ @@ -792,29 +799,29 @@ C_ENTRY(_debug_exception): lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ /* BIP bit is set on entry, no interrupts can occur */ - addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; + addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; SAVE_REGS; /* save all regs to pt_reg structure */ - swi r0, r1, PTO+PT_R0; /* R0 must be saved too */ - swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */ - swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */ + swi r0, r1, PT_R0; /* R0 must be saved too */ + swi r14, r1, PT_R14 /* rewrite saved R14 value */ + swi r16, r1, PT_PC; /* PC and r16 are the same */ /* save special purpose registers to pt_regs */ mfs r11, rear; - swi r11, r1, PTO+PT_EAR; + swi r11, r1, PT_EAR; mfs r11, resr; - swi r11, r1, PTO+PT_ESR; + swi r11, r1, PT_ESR; mfs r11, rfsr; - swi r11, r1, PTO+PT_FSR; + swi r11, r1, PT_FSR; /* stack pointer is in physical address at it is decrease - * by STATE_SAVE_SIZE but we need to get correct R1 value */ - addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE; - swi r11, r1, PTO+PT_R1 + * by PT_SIZE but we need to get correct R1 value */ + addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE; + swi r11, r1, PT_R1 /* MS: r31 - current pointer isn't changed */ tovirt(r1,r1) #ifdef CONFIG_KGDB - addi r5, r1, PTO /* pass pt_reg address as the first arg */ - la r15, r0, dbtrap_call; /* return address */ + addi r5, r1, 0 /* pass pt_reg address as the first arg */ + addik r15, r0, dbtrap_call; /* return address */ rtbd r0, microblaze_kgdb_break nop; #endif @@ -829,16 +836,16 @@ C_ENTRY(_debug_exception): addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ tophys(r1,r1); - addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ + addik r1, r1, -PT_SIZE; /* Make room on the stack. */ SAVE_REGS; - swi r16, r1, PTO+PT_PC; /* Save LP */ - swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ + swi r16, r1, PT_PC; /* Save LP */ + swi r0, r1, PT_MODE; /* Was in user-mode. */ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); - swi r11, r1, PTO+PT_R1; /* Store user SP. */ + swi r11, r1, PT_R1; /* Store user SP. */ lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); tovirt(r1,r1) set_vms; - addik r5, r1, PTO; + addik r5, r1, 0; addik r15, r0, dbtrap_call; dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ rtbd r0, sw_exception @@ -846,7 +853,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ /* MS: The first instruction for the second part of the gdb/kgdb */ set_bip; /* Ints masked for state restore */ - lwi r11, r1, PTO + PT_MODE; + lwi r11, r1, PT_MODE; bnei r11, 2f; /* MS: Return to user space - gdb */ /* Get current task ptr into r11 */ @@ -865,7 +872,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ - addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ + addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ addi r7, r0, 0; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ add r6, r0, r0; /* Arg 2: sigset_t *oldset */ @@ -876,7 +883,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ tophys(r1,r1); /* MS: Restore all regs */ RESTORE_REGS - addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */ + addik r1, r1, PT_SIZE /* Clean up stack space */ lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ DBTRAP_return_user: /* MS: Make global symbol for debugging */ rtbd r16, 0; /* MS: Instructions to return from a debug trap */ @@ -887,9 +894,9 @@ DBTRAP_return_user: /* MS: Make global symbol for debugging */ tophys(r1,r1); /* MS: Restore all regs */ RESTORE_REGS - lwi r14, r1, PTO+PT_R14; - lwi r16, r1, PTO+PT_PC; - addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */ + lwi r14, r1, PT_R14; + lwi r16, r1, PT_PC; + addik r1, r1, PT_SIZE; /* MS: Clean up stack space */ tovirt(r1,r1); DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ rtbd r16, 0; /* MS: Instructions to return from a debug trap */ @@ -981,20 +988,22 @@ ENTRY(_switch_to) nop ENTRY(_reset) - brai 0x70; /* Jump back to FS-boot */ + brai 0; /* Jump to reset vector */ /* These are compiled and loaded into high memory, then * copied into place in mach_early_setup */ .section .init.ivt, "ax" +#if CONFIG_MANUAL_RESET_VECTOR .org 0x0 - /* this is very important - here is the reset vector */ - /* in current MMU branch you don't care what is here - it is - * used from bootloader site - but this is correct for FS-BOOT */ - brai 0x70 - nop + brai CONFIG_MANUAL_RESET_VECTOR +#endif + .org 0x8 brai TOPHYS(_user_exception); /* syscall handler */ + .org 0x10 brai TOPHYS(_interrupt); /* Interrupt handler */ + .org 0x18 brai TOPHYS(_debug_exception); /* debug trap handler */ + .org 0x20 brai TOPHYS(_hw_exception_handler); /* HW exception handler */ .section .rodata,"a" diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index a7fa6ae76d89..66fad2301221 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -50,7 +50,7 @@ void die(const char *str, struct pt_regs *fp, long err) } /* for user application debugging */ -void sw_exception(struct pt_regs *regs) +asmlinkage void sw_exception(struct pt_regs *regs) { _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); flush_dcache_range(regs->r16, regs->r16 + 0x4); diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 778a5ce2e4fc..77320b8fc16a 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -39,7 +39,7 @@ #include <asm/mmu.h> #include <asm/processor.h> -.data +.section .data .global empty_zero_page .align 12 empty_zero_page: @@ -50,6 +50,11 @@ swapper_pg_dir: #endif /* CONFIG_MMU */ +.section .rodata +.align 4 +endian_check: + .word 1 + __HEAD ENTRY(_start) #if CONFIG_KERNEL_BASE_ADDR == 0 @@ -79,10 +84,7 @@ real_start: /* Does r7 point to a valid FDT? Load HEADER magic number */ /* Run time Big/Little endian platform */ /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */ - addik r11, r0, 0x1 /* BIG/LITTLE checking value */ - /* __bss_start will be zeroed later - it is just temp location */ - swi r11, r0, TOPHYS(__bss_start) - lbui r11, r0, TOPHYS(__bss_start) + lbui r11, r0, TOPHYS(endian_check) beqid r11, big_endian /* DO NOT break delay stop dependency */ lw r11, r0, r7 /* Big endian load in delay slot */ lwr r11, r0, r7 /* Little endian load */ @@ -222,26 +224,26 @@ start_here: #endif /* CONFIG_MMU */ /* Initialize small data anchors */ - la r13, r0, _KERNEL_SDA_BASE_ - la r2, r0, _KERNEL_SDA2_BASE_ + addik r13, r0, _KERNEL_SDA_BASE_ + addik r2, r0, _KERNEL_SDA2_BASE_ /* Initialize stack pointer */ - la r1, r0, init_thread_union + THREAD_SIZE - 4 + addik r1, r0, init_thread_union + THREAD_SIZE - 4 /* Initialize r31 with current task address */ - la r31, r0, init_task + addik r31, r0, init_task /* * Call platform dependent initialize function. * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for * the function. */ - la r9, r0, machine_early_init + addik r9, r0, machine_early_init brald r15, r9 nop #ifndef CONFIG_MMU - la r15, r0, machine_halt + addik r15, r0, machine_halt braid start_kernel nop #else diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 782680de3121..56572e923a83 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -77,6 +77,8 @@ #include <asm/signal.h> #include <asm/asm-offsets.h> +#undef DEBUG + /* Helpful Macros */ #define NUM_TO_REG(num) r ## num @@ -91,7 +93,7 @@ lwi r6, r1, PT_R6; \ lwi r11, r1, PT_R11; \ lwi r31, r1, PT_R31; \ - lwi r1, r0, TOPHYS(r0_ram + 0); + lwi r1, r1, PT_R1; #endif /* CONFIG_MMU */ #define LWREG_NOP \ @@ -206,8 +208,8 @@ * | . | * | . | * - * NO_MMU kernel use the same r0_ram pointed space - look to vmlinux.lds.S - * which is used for storing register values - old style was, that value were + * MMU kernel uses the same 'pt_pool_space' pointed space + * which is used for storing register values - noMMu style was, that values were * stored in stack but in case of failure you lost information about register. * Currently you can see register value in memory in specific place. * In compare to with previous solution the speed should be the same. @@ -226,8 +228,22 @@ */ /* wrappers to restore state before coming to entry.S */ - #ifdef CONFIG_MMU +.section .data +.align 4 +pt_pool_space: + .space PT_SIZE + +#ifdef DEBUG +/* Create space for exception counting. */ +.section .data +.global exception_debug_table +.align 4 +exception_debug_table: + /* Look at exception vector table. There is 32 exceptions * word size */ + .space (32 * 4) +#endif /* DEBUG */ + .section .rodata .align 4 _MB_HW_ExceptionVectorTable: @@ -287,10 +303,10 @@ _hw_exception_handler: #ifndef CONFIG_MMU addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ #else - swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */ + swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */ /* Save date to kernel memory. Here is the problem * when you came from user space */ - ori r1, r0, TOPHYS(r0_ram + 28); + ori r1, r0, TOPHYS(pt_pool_space); #endif swi r3, r1, PT_R3 swi r4, r1, PT_R4 @@ -329,12 +345,12 @@ not_in_delay_slot: #ifdef DEBUG /* counting which exception happen */ - lwi r5, r0, 0x200 + TOPHYS(r0_ram) + lwi r5, r0, TOPHYS(exception_debug_table) addi r5, r5, 1 - swi r5, r0, 0x200 + TOPHYS(r0_ram) - lwi r5, r6, 0x200 + TOPHYS(r0_ram) + swi r5, r0, TOPHYS(exception_debug_table) + lwi r5, r6, TOPHYS(exception_debug_table) addi r5, r5, 1 - swi r5, r6, 0x200 + TOPHYS(r0_ram) + swi r5, r6, TOPHYS(exception_debug_table) #endif /* end */ /* Load the HW Exception vector */ @@ -474,7 +490,7 @@ ex_lw_tail: /* Get the destination register number into r5 */ lbui r5, r0, TOPHYS(ex_reg_op); /* Form load_word jump table offset (lw_table + (8 * regnum)) */ - la r6, r0, TOPHYS(lw_table); + addik r6, r0, TOPHYS(lw_table); addk r5, r5, r5; addk r5, r5, r5; addk r5, r5, r5; @@ -485,7 +501,7 @@ ex_sw: /* Get the destination register number into r5 */ lbui r5, r0, TOPHYS(ex_reg_op); /* Form store_word jump table offset (sw_table + (8 * regnum)) */ - la r6, r0, TOPHYS(sw_table); + addik r6, r0, TOPHYS(sw_table); add r5, r5, r5; add r5, r5, r5; add r5, r5, r5; @@ -896,7 +912,7 @@ ex_lw_vm: beqid r6, ex_lhw_vm; load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ /* Load a word, byte-by-byte from destination address and save it in tmp space*/ - la r6, r0, ex_tmp_data_loc_0; + addik r6, r0, ex_tmp_data_loc_0; sbi r5, r6, 0; load2: lbui r5, r4, 1; sbi r5, r6, 1; @@ -910,7 +926,7 @@ load4: lbui r5, r4, 3; ex_lhw_vm: /* Load a half-word, byte-by-byte from destination address and * save it in tmp space */ - la r6, r0, ex_tmp_data_loc_0; + addik r6, r0, ex_tmp_data_loc_0; sbi r5, r6, 0; load5: lbui r5, r4, 1; sbi r5, r6, 1; @@ -926,7 +942,7 @@ ex_sw_vm: addik r5, r8, sw_table_vm; bra r5; ex_sw_tail_vm: - la r5, r0, ex_tmp_data_loc_0; + addik r5, r0, ex_tmp_data_loc_0; beqid r6, ex_shw_vm; swi r3, r5, 0; /* Get the word - delay slot */ /* Store the word, byte-by-byte into destination address */ @@ -969,7 +985,7 @@ ex_unaligned_fixup: addik r7, r0, SIGSEGV /* call bad_page_fault for finding aligned fixup, fixup address is saved * in PT_PC which is used as return address from exception */ - la r15, r0, ret_from_exc-8 /* setup return address */ + addik r15, r0, ret_from_exc-8 /* setup return address */ brid bad_page_fault nop diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index d61ea33aff7c..e4661285118e 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c @@ -40,59 +40,46 @@ unsigned int nr_irq; #define MER_ME (1<<0) #define MER_HIE (1<<1) -static void intc_enable_or_unmask(unsigned int irq) +static void intc_enable_or_unmask(struct irq_data *d) { - unsigned long mask = 1 << irq; - pr_debug("enable_or_unmask: %d\n", irq); + unsigned long mask = 1 << d->irq; + pr_debug("enable_or_unmask: %d\n", d->irq); out_be32(INTC_BASE + SIE, mask); /* ack level irqs because they can't be acked during * ack function since the handle_level_irq function * acks the irq before calling the interrupt handler */ - if (irq_desc[irq].status & IRQ_LEVEL) + if (irq_to_desc(d->irq)->status & IRQ_LEVEL) out_be32(INTC_BASE + IAR, mask); } -static void intc_disable_or_mask(unsigned int irq) +static void intc_disable_or_mask(struct irq_data *d) { - pr_debug("disable: %d\n", irq); - out_be32(INTC_BASE + CIE, 1 << irq); + pr_debug("disable: %d\n", d->irq); + out_be32(INTC_BASE + CIE, 1 << d->irq); } -static void intc_ack(unsigned int irq) +static void intc_ack(struct irq_data *d) { - pr_debug("ack: %d\n", irq); - out_be32(INTC_BASE + IAR, 1 << irq); + pr_debug("ack: %d\n", d->irq); + out_be32(INTC_BASE + IAR, 1 << d->irq); } -static void intc_mask_ack(unsigned int irq) +static void intc_mask_ack(struct irq_data *d) { - unsigned long mask = 1 << irq; - pr_debug("disable_and_ack: %d\n", irq); + unsigned long mask = 1 << d->irq; + pr_debug("disable_and_ack: %d\n", d->irq); out_be32(INTC_BASE + CIE, mask); out_be32(INTC_BASE + IAR, mask); } -static void intc_end(unsigned int irq) -{ - unsigned long mask = 1 << irq; - pr_debug("end: %d\n", irq); - if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { - out_be32(INTC_BASE + SIE, mask); - /* ack level sensitive intr */ - if (irq_desc[irq].status & IRQ_LEVEL) - out_be32(INTC_BASE + IAR, mask); - } -} - static struct irq_chip intc_dev = { .name = "Xilinx INTC", - .unmask = intc_enable_or_unmask, - .mask = intc_disable_or_mask, - .ack = intc_ack, - .mask_ack = intc_mask_ack, - .end = intc_end, + .irq_unmask = intc_enable_or_unmask, + .irq_mask = intc_disable_or_mask, + .irq_ack = intc_ack, + .irq_mask_ack = intc_mask_ack, }; unsigned int get_irq(struct pt_regs *regs) @@ -172,11 +159,11 @@ void __init init_IRQ(void) if (intr_type & (0x00000001 << i)) { set_irq_chip_and_handler_name(i, &intc_dev, handle_edge_irq, intc_dev.name); - irq_desc[i].status &= ~IRQ_LEVEL; + irq_clear_status_flags(i, IRQ_LEVEL); } else { set_irq_chip_and_handler_name(i, &intc_dev, handle_level_irq, intc_dev.name); - irq_desc[i].status |= IRQ_LEVEL; + irq_set_status_flags(i, IRQ_LEVEL); } } } diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index a9345fb4906a..098822413729 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -50,6 +50,7 @@ next_irq: int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; + struct irq_desc *desc; struct irqaction *action; unsigned long flags; @@ -61,8 +62,9 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < nr_irq) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; + desc = irq_to_desc(i); + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; if (!action) goto skip; seq_printf(p, "%3d: ", i); @@ -72,9 +74,9 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif - seq_printf(p, " %8s", irq_desc[i].status & + seq_printf(p, " %8s", desc->status & IRQ_LEVEL ? "level" : "edge"); - seq_printf(p, " %8s", irq_desc[i].chip->name); + seq_printf(p, " %8s", desc->irq_data.chip->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) @@ -82,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; } diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 5cb034174005..49faeb429599 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -24,6 +24,7 @@ extern char *_ebss; EXPORT_SYMBOL_GPL(_ebss); + #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); EXPORT_SYMBOL(_mcount); @@ -45,3 +46,14 @@ EXPORT_SYMBOL(empty_zero_page); #endif EXPORT_SYMBOL(mbc); + +extern void __divsi3(void); +EXPORT_SYMBOL(__divsi3); +extern void __modsi3(void); +EXPORT_SYMBOL(__modsi3); +extern void __mulsi3(void); +EXPORT_SYMBOL(__mulsi3); +extern void __udivsi3(void); +EXPORT_SYMBOL(__udivsi3); +extern void __umodsi3(void); +EXPORT_SYMBOL(__umodsi3); diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index ba7c4b16ed35..968648a81c1e 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -159,7 +159,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, } /* FIXME STATE_SAVE_PT_OFFSET; */ - ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE; + ti->cpu_context.r1 = (unsigned long)childregs; /* we should consider the fact that childregs is a copy of the parent * regs which were saved immediately after entering the kernel state * before enabling VM. This MSR will be restored in switch_to and diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index bceaa5543e39..00ee90f08343 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -59,7 +59,7 @@ static int __init early_init_dt_scan_serial(unsigned long node, { unsigned long l; char *p; - int *addr; + const __be32 *addr; pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname); diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c index 9ae24f4b882b..47187cc2cf00 100644 --- a/arch/microblaze/kernel/prom_parse.c +++ b/arch/microblaze/kernel/prom_parse.c @@ -2,88 +2,11 @@ #include <linux/kernel.h> #include <linux/string.h> -#include <linux/pci_regs.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/etherdevice.h> #include <linux/of_address.h> #include <asm/prom.h> -#include <asm/pci-bridge.h> - -#ifdef CONFIG_PCI -int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) -{ - struct device_node *dn, *ppnode; - struct pci_dev *ppdev; - u32 lspec; - u32 laddr[3]; - u8 pin; - int rc; - - /* Check if we have a device node, if yes, fallback to standard OF - * parsing - */ - dn = pci_device_to_OF_node(pdev); - if (dn) - return of_irq_map_one(dn, 0, out_irq); - - /* Ok, we don't, time to have fun. Let's start by building up an - * interrupt spec. we assume #interrupt-cells is 1, which is standard - * for PCI. If you do different, then don't use that routine. - */ - rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); - if (rc != 0) - return rc; - /* No pin, exit */ - if (pin == 0) - return -ENODEV; - - /* Now we walk up the PCI tree */ - lspec = pin; - for (;;) { - /* Get the pci_dev of our parent */ - ppdev = pdev->bus->self; - - /* Ouch, it's a host bridge... */ - if (ppdev == NULL) { - struct pci_controller *host; - host = pci_bus_to_host(pdev->bus); - ppnode = host ? host->dn : NULL; - /* No node for host bridge ? give up */ - if (ppnode == NULL) - return -EINVAL; - } else - /* We found a P2P bridge, check if it has a node */ - ppnode = pci_device_to_OF_node(ppdev); - - /* Ok, we have found a parent with a device-node, hand over to - * the OF parsing code. - * We build a unit address from the linux device to be used for - * resolution. Note that we use the linux bus number which may - * not match your firmware bus numbering. - * Fortunately, in most cases, interrupt-map-mask doesn't - * include the bus number as part of the matching. - * You should still be careful about that though if you intend - * to rely on this function (you ship a firmware that doesn't - * create device nodes for all PCI devices). - */ - if (ppnode) - break; - - /* We can only get here if we hit a P2P bridge with no node, - * let's do standard swizzling and try again - */ - lspec = pci_swizzle_interrupt_pin(pdev, lspec); - pdev = ppdev; - } - - laddr[0] = (pdev->bus->number << 16) - | (pdev->devfn << 8); - laddr[1] = laddr[2] = 0; - return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); -} -EXPORT_SYMBOL_GPL(of_irq_map_pci); -#endif /* CONFIG_PCI */ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 05ac8cc975d5..6a8e0cc5c57d 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -39,6 +39,7 @@ #include <linux/uaccess.h> #include <asm/asm-offsets.h> #include <asm/cacheflush.h> +#include <asm/syscall.h> #include <asm/io.h> /* Returns the address where the register at REG_OFFS in P is stashed away. */ @@ -123,7 +124,7 @@ long arch_ptrace(struct task_struct *child, long request, rval = -EIO; if (rval == 0 && request == PTRACE_PEEKUSR) - rval = put_user(val, (unsigned long *)data); + rval = put_user(val, (unsigned long __user *)data); break; default: rval = ptrace_request(child, request, addr, data); diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 9312fbb37efd..8e2c09b7ff26 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -95,7 +95,8 @@ inline unsigned get_romfs_len(unsigned *addr) void __init machine_early_init(const char *cmdline, unsigned int ram, unsigned int fdt, unsigned int msr) { - unsigned long *src, *dst = (unsigned long *)0x0; + unsigned long *src, *dst; + unsigned int offset = 0; /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the * end of kernel. There are two position which we want to check. @@ -168,7 +169,14 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, "CPU have it %x\n", msr); #endif - for (src = __ivt_start; src < __ivt_end; src++, dst++) + /* Do not copy reset vectors. offset = 0x2 means skip the first + * two instructions. dst is pointer to MB vectors which are placed + * in block ram. If you want to copy reset vector setup offset to 0x0 */ +#if !CONFIG_MANUAL_RESET_VECTOR + offset = 0x2; +#endif + dst = (unsigned long *) (offset * sizeof(u32)); + for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) *dst = *src; /* Initialize global data */ diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index d8d3bb396cd6..599671168980 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -93,7 +93,7 @@ static int restore_sigcontext(struct pt_regs *regs, asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame = - (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE); + (struct rt_sigframe __user *)(regs->r1); sigset_t set; int rval; @@ -197,8 +197,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user((void *)current->sas_ss_sp, + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->r1), &frame->uc.uc_stack.ss_flags); @@ -247,7 +247,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, goto give_sigsegv; /* Set up registers for signal handler */ - regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE; + regs->r1 = (unsigned long) frame; /* Signal handler args: */ regs->r5 = signal; /* arg 0: signum */ diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 2250fe9d269b..e5b154f24f85 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -40,7 +40,8 @@ asmlinkage long microblaze_vfork(struct pt_regs *regs) regs, 0, NULL, NULL); } -asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs *regs) +asmlinkage long microblaze_clone(int flags, unsigned long stack, + struct pt_regs *regs) { if (!stack) stack = regs->r1; diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index a5aa33db1df3..d8a214f11ac2 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -38,8 +38,8 @@ static unsigned int timer_baseaddr; #define TIMER_BASE timer_baseaddr #endif -unsigned int freq_div_hz; -unsigned int timer_clock_freq; +static unsigned int freq_div_hz; +static unsigned int timer_clock_freq; #define TCSR0 (0x00) #define TLR0 (0x04) @@ -202,7 +202,7 @@ static struct cyclecounter microblaze_cc = { .shift = 8, }; -int __init init_microblaze_timecounter(void) +static int __init init_microblaze_timecounter(void) { microblaze_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, microblaze_cc.shift); diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index fefac5c33586..9781a528cfc9 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -183,7 +183,7 @@ static inline void unwind_trap(struct task_struct *task, unsigned long pc, * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log */ -void microblaze_unwind_inner(struct task_struct *task, +static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, struct stack_trace *trace) diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 3451bdec9f05..ac0e1a5d4782 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -70,11 +70,6 @@ SECTIONS { RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) _edata = . ; - /* Reserve some low RAM for r0 based memory references */ - . = ALIGN(0x4) ; - r0_ram = . ; - . = . + PAGE_SIZE; /* a page should be enough */ - /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ . = ALIGN(8); .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c index d4860e154d29..0585bccb7fad 100644 --- a/arch/microblaze/lib/muldi3.c +++ b/arch/microblaze/lib/muldi3.c @@ -58,3 +58,4 @@ DWtype __muldi3(DWtype u, DWtype v) return w.ll; } +EXPORT_SYMBOL(__muldi3); diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 5a59dad62bd2..a1e2e18e0961 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -59,7 +59,7 @@ * uncached region. This will no doubt cause big problems if memory allocated * here is not also freed properly. -- JW */ -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) { unsigned long order, vaddr; void *ret; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 57bd2a09610c..ae97d2ccdc22 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -48,7 +48,7 @@ static int store_updates_sp(struct pt_regs *regs) { unsigned int inst; - if (get_user(inst, (unsigned int *)regs->pc)) + if (get_user(inst, (unsigned int __user *)regs->pc)) return 0; /* check for 1 in the rD field */ if (((inst >> 21) & 0x1f) != 1) diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index e363615d6798..1e01a1253631 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_pci.h> #include <asm/processor.h> #include <asm/io.h> diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c index 3c3d808d7ce0..92728a6cfd80 100644 --- a/arch/microblaze/pci/pci_32.c +++ b/arch/microblaze/pci/pci_32.c @@ -332,6 +332,7 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose) hose->global_number); return; } + bus.dev->of_node = of_node_get(node); bus->secondary = hose->first_busno; hose->bus = bus; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,6 +4,7 @@ config MIPS select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_OPROFILE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_ARCH_KGDB @@ -208,6 +209,7 @@ config MACH_JZ4740 select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK select HAVE_PWM + select HAVE_CLK config LASAT bool "LASAT Networks platforms" @@ -333,6 +335,8 @@ config PNX8550_STB810 config PMC_MSP bool "PMC-Sierra MSP chipsets" depends on EXPERIMENTAL + select CEVT_R4K + select CSRC_R4K select DMA_NONCOHERENT select SWAP_IO_SPACE select NO_EXCEPT_FILL diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c @@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); static void mtx1_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void mtx1_power_off(void) diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c @@ -28,6 +28,8 @@ #include <linux/mtd/physmap.h> #include <mtd/mtd-abi.h> +#include <asm/mach-au1x00/au1xxx_eth.h> + static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { &mtx1_mtd, }; +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { + .phy_search_highest_addr = 1, + .phy1_search_mac0 = 1, +}; + static int __init mtx1_register_devices(void) { int rc; + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); + rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c @@ -36,8 +36,8 @@ static void xxs1500_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void xxs1500_power_off(void) diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index b9cce90346cf..6ebf1734b411 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -75,7 +75,7 @@ } static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int retval; + int ret = 0; + u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; if (cpu_has_llsc && R10000_LLSC_WAR) { @@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %0, %2 \n" - " bne %0, %z3, 3f \n" + "1: ll %1, %3 \n" + " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z4 \n" + " move $1, %z5 \n" " .set mips3 \n" - "2: sc $1, %1 \n" + "2: sc $1, %2 \n" " beqzl $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %5 \n" + "4: li %0, %6 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "=&r" (retval), "=R" (*uaddr) + : "+r" (ret), "=&r" (val), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { @@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %0, %2 \n" - " bne %0, %z3, 3f \n" + "1: ll %1, %3 \n" + " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z4 \n" + " move $1, %z5 \n" " .set mips3 \n" - "2: sc $1, %1 \n" + "2: sc $1, %2 \n" " beqz $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %5 \n" + "4: li %0, %6 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "=&r" (retval), "=R" (*uaddr) + : "+r" (ret), "=&r" (val), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else return -ENOSYS; - return retval; + *uval = val; + return ret; } #endif diff --git a/arch/mips/include/asm/ioctls.h b/arch/mips/include/asm/ioctls.h index d967b8997626..92403c3d6007 100644 --- a/arch/mips/include/asm/ioctls.h +++ b/arch/mips/include/asm/ioctls.h @@ -85,6 +85,7 @@ #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 /* I hope the range from 0x5480 on is free ... */ #define TIOCSCTTY 0x5480 /* become controlling tty */ diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h @@ -11,15 +11,5 @@ #ifndef __MIPS_PERF_EVENT_H__ #define __MIPS_PERF_EVENT_H__ - -/* - * MIPS performance counters do not raise NMI upon overflow, a regular - * interrupt will be signaled. Hence we can do the pending perf event - * work at the tail of the irq handler. - */ -static inline void -set_perf_event_pending(void) -{ -} - +/* Leave it empty here. The file is required by linux/perf_event.h */ #endif /* __MIPS_PERF_EVENT_H__ */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -17,29 +17,13 @@ #include <asm/cacheflush.h> #include <asm/uasm.h> -/* - * If the Instruction Pointer is in module space (0xc0000000), return true; - * otherwise, it is in kernel space (0x80000000), return false. - * - * FIXME: This will not work when the kernel space and module space are the - * same. If they are the same, we need to modify scripts/recordmcount.pl, - * ftrace_make_nop/call() and the other related parts to ensure the - * enabling/disabling of the calling site to _mcount is right for both kernel - * and module. - */ - -static inline int in_module(unsigned long ip) -{ - return ip & 0x40000000; -} +#include <asm-generic/sections.h> #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ -#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) @@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount --> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions + */ + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; /* - * We have compiled module with -mlong-calls, but compiled the kernel - * without it, we need to cope with them respectively. + * If ip is in kernel space, no long call, otherwise, long call is + * needed. */ - if (in_module(ip)) { -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * move $12, ra_address - * jalr v1 - * sub sp, sp, 8 - * 1: offset = 5 instructions - */ - new = INSN_B_1F_5; -#else - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * jalr v1 - * nop | move $12, ra_address | sub sp, sp, 8 - * 1: offset = 4 instructions - */ - new = INSN_B_1F_4; -#endif - } else { - /* - * move at, ra - * jal _mcount --> nop - */ - new = INSN_NOP; - } + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + return ftrace_modify_code(ip, new); } @@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - /* ip, module: 0xc0000000, kernel: 0x80000000 */ - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : + insn_lui_v1_hi16_mcount; return ftrace_modify_code(ip, new); } @@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_addr(unsigned long self_addr, - unsigned long parent, - unsigned long parent_addr, - unsigned long fp) +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { - unsigned long sp, ip, ra; + unsigned long sp, ip, tmp; unsigned int code; int faulted; /* - * For module, move the ip from calling site of mcount to the - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for - * kernel, move to the instruction "move ra, at"(offset is 12) + * For module, move the ip from the return address after the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for + * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_module(self_addr) ? 20 : 12); + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} * ra, offset(sp)" instruction */ do { - ip -= 4; - /* get the code at "ip": code = *(unsigned int *)ip; */ safe_load_code(code, ip, faulted); @@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * store the ra on the stack */ if ((code & S_R_SP) != S_R_SP) - return parent_addr; + return parent_ra_addr; - } while (((code & S_RA_SP) != S_RA_SP)); + /* Move to the next instruction */ + ip -= 4; + } while ((code & S_RA_SP) != S_RA_SP); sp = fp + (code & OFFSET_MASK); - /* ra = *(unsigned long *)sp; */ - safe_load_stack(ra, sp, faulted); + /* tmp = *(unsigned long *)sp; */ + safe_load_stack(tmp, sp, faulted); if (unlikely(faulted)) return 0; - if (ra == parent) + if (tmp == old_parent_ra) return sp; return 0; } @@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { - unsigned long old; + unsigned long old_parent_ra; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; - int faulted; + int faulted, insns; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; /* - * "parent" is the stack address saved the return address of the caller - * of _mcount. + * "parent_ra_addr" is the stack address saved the return address of + * the caller of _mcount. * * if the gcc < 4.5, a leaf function does not save the return address * in the stack address, so, we "emulate" one in _mcount's stack space, @@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, * do it in ftrace_graph_caller of mcount.S. */ - /* old = *parent; */ - safe_load_stack(old, parent, faulted); + /* old_parent_ra = *parent_ra_addr; */ + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, - (unsigned long)parent, fp); + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, + old_parent_ra, (unsigned long)parent_ra_addr, fp); /* * If fails when getting the stack address of the non-leaf function's * ra, stop function graph tracer and return */ - if (parent == 0) + if (parent_ra_addr == 0) goto out; #endif - /* *parent = return_hooker; */ - safe_store_stack(return_hooker, parent, faulted); + /* *parent_ra_addr = return_hooker; */ + safe_store_stack(return_hooker, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == - -EBUSY) { - *parent = old; + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) + == -EBUSY) { + *parent_ra_addr = old_parent_ra; return; } - trace.func = self_addr; + /* + * Get the recorded ip of the current mcount calling site in the + * __mcount_loc section, which will be used to filter the function + * entries configured through the tracing/set_graph_function interface. + */ + + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; - *parent = old; + *parent_ra_addr = old_parent_ra; } return; out: diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, return ret; } -static int mipspmu_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* To look for a free counter for this event. */ - idx = mipspmu->alloc_counter(cpuc, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then - * make sure it is disabled. - */ - event->hw.idx = idx; - mipspmu->disable_event(idx); - cpuc->events[idx] = event; - - /* Set the period for the event. */ - mipspmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - mipspmu->enable_event(hwc, idx); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - return err; -} - static void mipspmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) @@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, unsigned long flags; int shift = 64 - TOTAL_BITS; s64 prev_raw_count, new_raw_count; - s64 delta; + u64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); @@ -231,32 +196,90 @@ again: return; } -static void mipspmu_disable(struct perf_event *event) +static void mipspmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + mipspmu->enable_event(hwc, hwc->idx); +} + +static void mipspmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + mipspmu->disable_event(hwc->idx); + barrier(); + mipspmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int mipspmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int idx; + int err = 0; + perf_pmu_disable(event->pmu); - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + /* To look for a free counter for this event. */ + idx = mipspmu->alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } - /* We are working on a local event. */ + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; mipspmu->disable_event(idx); + cpuc->events[idx] = event; - barrier(); - - mipspmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + mipspmu_start(event, PERF_EF_RELOAD); + /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; } -static void mipspmu_unthrottle(struct perf_event *event) +static void mipspmu_del(struct perf_event *event, int flags) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; - mipspmu->enable_event(hwc, hwc->idx); + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + mipspmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); } static void mipspmu_read(struct perf_event *event) @@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) mipspmu_event_update(event, hwc, hwc->idx); } -static struct pmu pmu = { - .enable = mipspmu_enable, - .disable = mipspmu_disable, - .unthrottle = mipspmu_unthrottle, - .read = mipspmu_read, -}; +static void mipspmu_enable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->start(); +} + +static void mipspmu_disable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->stop(); +} static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmu_reserve_mutex); @@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) perf_irq = save_perf_irq; } +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu->num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int mipspmu_event_init(struct perf_event *event) +{ + int err = 0; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (!mipspmu || event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { + atomic_dec(&active_events); + return -ENOSPC; + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static struct pmu pmu = { + .pmu_enable = mipspmu_enable, + .pmu_disable = mipspmu_disable, + .event_init = mipspmu_event_init, + .add = mipspmu_add, + .del = mipspmu_del, + .start = mipspmu_start, + .stop = mipspmu_stop, + .read = mipspmu_read, +}; + static inline unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) { @@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_hwc = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + /* Allow mixed event group. So return 1 to pass validation. */ + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; } @@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) return 0; } -/* - * mipsxx/rm9000/loongson2 have different performance counters, they have - * specific low-level init routines. - */ -static void reset_counters(void *arg); -static int __hw_perf_event_init(struct perf_event *event); - -static void hw_perf_event_destroy(struct perf_event *event) -{ - if (atomic_dec_and_mutex_lock(&active_events, - &pmu_reserve_mutex)) { - /* - * We must not call the destroy function with interrupts - * disabled. - */ - on_each_cpu(reset_counters, - (void *)(long)mipspmu->num_counters, 1); - mipspmu_free_irq(); - mutex_unlock(&pmu_reserve_mutex); - } -} - -const struct pmu *hw_perf_event_init(struct perf_event *event) -{ - int err = 0; - - if (!mipspmu || event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return ERR_PTR(-ENODEV); - - if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { - atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); - } - - mutex_lock(&pmu_reserve_mutex); - if (atomic_read(&active_events) == 0) - err = mipspmu_get_irq(); - - if (!err) - atomic_inc(&active_events); - mutex_unlock(&pmu_reserve_mutex); - } - - if (err) - return ERR_PTR(err); - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err ? ERR_PTR(err) : &pmu; -} - -void hw_perf_enable(void) -{ - if (mipspmu) - mipspmu->start(); -} - -void hw_perf_disable(void) -{ - if (mipspmu) - mipspmu->stop(); -} - /* This is needed by specific irq handlers in perf_event_*.c */ static void handle_associated_event(struct cpu_hw_events *cpuc, @@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, #include "perf_event_mipsxx.c" /* Callchain handling code. */ -static inline void -callchain_store(struct perf_callchain_entry *entry, - u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} /* * Leave userspace callchain empty for now. When we find a way to trace * the user stack callchains, we add here. */ -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) { } @@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { - callchain_store(entry, addr); + perf_callchain_store(entry, addr); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; } } } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; - callchain_store(entry, PERF_CONTEXT_KERNEL); if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(current); @@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, return; } do { - callchain_store(entry, pc); + perf_callchain_store(entry, pc); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else - callchain_store(entry, PERF_CONTEXT_KERNEL); save_raw_perf_callchain(entry, sp); #endif } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || !current->pid) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) { - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - if (regs) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) * interrupt, not NMI. */ if (handled == IRQ_HANDLED) - perf_event_do_pending(); + irq_work_run(); #ifdef CONFIG_MIPS_MT_SMP read_unlock(&pmuint_rwlock); @@ -1045,6 +1045,8 @@ init_hw_perf_events(void) "CPU, irq %d%s\n", mipspmu->name, counters, irq, irq < 0 ? " (share with timer interrupt)" : ""); + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + return 0; } early_initcall(init_hw_perf_events); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) static int protected_restore_fp_context(struct sigcontext __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) static int protected_restore_fp_context32(struct sigcontext32 __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) */ static struct task_struct *cpu_idle_thread[NR_CPUS]; +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; @@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) * Linux can schedule processes on this slave. */ if (!cpu_idle_thread[cpu]) { - idle = fork_idle(cpu); - cpu_idle_thread[cpu] = idle; + /* + * Schedule work item to avoid forking user task + * Ported from arch/x86/kernel/smpboot.c + */ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + idle = cpu_idle_thread[cpu] = c_idle.idle; if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -383,12 +383,11 @@ save_static_function(sys_sysmips); static int __used noinline _sys_sysmips(nabi_no_regargs struct pt_regs regs) { - long cmd, arg1, arg2, arg3; + long cmd, arg1, arg2; cmd = regs.regs[4]; arg1 = regs.regs[5]; arg2 = regs.regs[6]; - arg3 = regs.regs[7]; switch (cmd) { case MIPS_ATOMIC_SET: @@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) if (arg1 & 2) set_thread_flag(TIF_LOGADE); else - clear_thread_flag(TIF_FIXADE); + clear_thread_flag(TIF_LOGADE); return 0; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 570607b376b5..832afbb87588 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -115,7 +115,7 @@ SECTIONS EXIT_DATA } - PERCPU(PAGE_SIZE) + PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -148,9 +148,9 @@ struct { spinlock_t tc_list_lock; struct list_head tc_list; /* Thread contexts */ } vpecontrol = { - .vpe_list_lock = SPIN_LOCK_UNLOCKED, + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), - .tc_list_lock = SPIN_LOCK_UNLOCKED, + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -1,6 +1,7 @@ +if MACH_LOONGSON + choice prompt "Machine Type" - depends on MACH_LOONGSON config LEMOTE_FULOONG2E bool "Lemote Fuloong(2e) mini-PC" @@ -87,3 +88,5 @@ config LOONGSON_UART_BASE config LOONGSON_MC146818 bool default n + +endif # MACH_LOONGSON diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c @@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) strcat(arcs_cmdline, " "); } - if ((strstr(arcs_cmdline, "console=")) == NULL) - strcat(arcs_cmdline, " console=ttyS0,115200"); - if ((strstr(arcs_cmdline, "root=")) == NULL) - strcat(arcs_cmdline, " root=/dev/hda1"); - prom_init_machtype(); } diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c @@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) void __init prom_init_machtype(void) { - char *p, str[MACHTYPE_LEN]; + char *p, str[MACHTYPE_LEN + 1]; int machtype = MACH_LEMOTE_FL2E; mips_machtype = LOONGSON_MACHTYPE; @@ -53,6 +53,7 @@ void __init prom_init_machtype(void) } p += strlen("machtype="); strncpy(str, p, MACHTYPE_LEN); + str[MACHTYPE_LEN] = '\0'; p = strstr(str, " "); if (p) *p = '\0'; diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h @@ -70,7 +70,7 @@ #define COMPXSP \ - unsigned xm; int xe; int xs; int xc + unsigned xm; int xe; int xs __maybe_unused; int xc #define COMPYSP \ unsigned ym; int ye; int ys; int yc @@ -104,7 +104,7 @@ #define COMPXDP \ -u64 xm; int xe; int xs; int xc +u64 xm; int xe; int xs __maybe_unused; int xc #define COMPYDP \ u64 ym; int ye; int ys; int yc diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; - unsigned long lastpfn; + unsigned long lastpfn __maybe_unused; pagetable_init(); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -109,6 +109,8 @@ static bool scratchpad_available(void) static int scratchpad_offset(int i) { BUG(); + /* Really unreachable, but evidently some GCC want this. */ + return 0; } #endif /* diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c @@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { * RETURNS: PCIBIOS_SUCCESSFUL - success * ****************************************************************************/ -static int bpci_interrupt(int irq, void *dev_id) +static irqreturn_t bpci_interrupt(int irq, void *dev_id) { struct msp_pci_regs *preg = (void *)PCI_BASE_REG; unsigned int stat = preg->if_status; @@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) /* write to clear all asserted interrupts */ preg->if_status = stat; - return PCIBIOS_SUCCESSFUL; + return IRQ_HANDLED; } /***************************************************************************** diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig @@ -4,15 +4,11 @@ choice config PMC_MSP4200_EVAL bool "PMC-Sierra MSP4200 Eval Board" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI config PMC_MSP4200_GW bool "PMC-Sierra MSP4200 VoIP Gateway" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c @@ -81,7 +81,7 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_rate/2; } -unsigned int __init get_c0_compare_int(void) +unsigned int __cpuinit get_c0_compare_int(void) { return MSP_INT_VPE0_TIMER; } diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 92d2f9298e38..9d773a639513 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m, * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (ACCESS_ONCE((v)->counter)) /** * atomic_set - set atomic variable diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 679dee0bbd08..3d6e60dad9d9 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_check(x, ptr, size) \ ({ \ + const __typeof__(ptr) __guc_ptr = (ptr); \ int _e; \ - if (likely(__access_ok((unsigned long) (ptr), (size)))) \ - _e = __get_user_nocheck((x), (ptr), (size)); \ + if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ + _e = __get_user_nocheck((x), __guc_ptr, (size)); \ else { \ _e = -EFAULT; \ (x) = (__typeof__(x))0; \ diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index 75da468090b9..5b955000626d 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c @@ -104,8 +104,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) unsigned tsc, elapse; irqreturn_t ret; - write_seqlock(&xtime_lock); - while (tsc = get_cycles(), elapse = tsc - mn10300_last_tsc, /* time elapsed since last * tick */ @@ -114,11 +112,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) mn10300_last_tsc += MN10300_TSC_PER_HZ; /* advance the kernel's time tracking system */ - do_timer(1); + xtime_update(1); } - write_sequnlock(&xtime_lock); - ret = local_timer_interrupt(); #ifdef CONFIG_SMP send_IPI_allbutself(LOCAL_TIMER_IPI); diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index febbeee7f2f5..968bcd2cb022 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -70,7 +70,7 @@ SECTIONS .exit.text : { EXIT_TEXT; } .exit.data : { EXIT_DATA; } - PERCPU(PAGE_SIZE) + PERCPU(32, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c index a8933a60b2d4..a6b63dde603d 100644 --- a/arch/mn10300/mm/cache-inv-icache.c +++ b/arch/mn10300/mm/cache-inv-icache.c @@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end) /* invalidate the icache coverage on that region */ mn10300_local_icache_inv_range2(addr + off, size); - smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); } /** @@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end) * directly */ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; mn10300_icache_inv_range(start_page, end); - smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); if (start_page == start) goto done; end = start_page; diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index 30394081d9b6..6ab9580b0b00 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c @@ -185,26 +185,21 @@ struct hpux_statfs { int16_t f_pad; }; -static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf) +static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p) { - struct kstatfs st; - int retval; - - retval = vfs_statfs(path, &st); - if (retval) - return retval; - - memset(buf, 0, sizeof(*buf)); - buf->f_type = st.f_type; - buf->f_bsize = st.f_bsize; - buf->f_blocks = st.f_blocks; - buf->f_bfree = st.f_bfree; - buf->f_bavail = st.f_bavail; - buf->f_files = st.f_files; - buf->f_ffree = st.f_ffree; - buf->f_fsid[0] = st.f_fsid.val[0]; - buf->f_fsid[1] = st.f_fsid.val[1]; - + struct hpux_statfs buf; + memset(&buf, 0, sizeof(buf)); + buf.f_type = st->f_type; + buf.f_bsize = st->f_bsize; + buf.f_blocks = st->f_blocks; + buf.f_bfree = st->f_bfree; + buf.f_bavail = st->f_bavail; + buf.f_files = st->f_files; + buf.f_ffree = st->f_ffree; + buf.f_fsid[0] = st->f_fsid.val[0]; + buf.f_fsid[1] = st->f_fsid.val[1]; + if (copy_to_user(p, &buf, sizeof(buf))) + return -EFAULT; return 0; } @@ -212,35 +207,19 @@ static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf) asmlinkage long hpux_statfs(const char __user *pathname, struct hpux_statfs __user *buf) { - struct path path; - int error; - - error = user_path(pathname, &path); - if (!error) { - struct hpux_statfs tmp; - error = do_statfs_hpux(&path, &tmp); - if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) - error = -EFAULT; - path_put(&path); - } + struct kstatfs st; + int error = user_statfs(pathname, &st); + if (!error) + error = do_statfs_hpux(&st, buf); return error; } asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf) { - struct file *file; - struct hpux_statfs tmp; - int error; - - error = -EBADF; - file = fget(fd); - if (!file) - goto out; - error = do_statfs_hpux(&file->f_path, &tmp); - if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) - error = -EFAULT; - fput(file); - out: + struct kstatfs st; + int error = fd_statfs(fd, &st); + if (!error) + error = do_statfs_hpux(&st, buf); return error; } diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h index f357fc693c89..0304b92ccfea 100644 --- a/arch/parisc/include/asm/fcntl.h +++ b/arch/parisc/include/asm/fcntl.h @@ -19,6 +19,8 @@ #define O_NOFOLLOW 000000200 /* don't follow links */ #define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ +#define O_PATH 020000000 + #define F_GETLK64 8 #define F_SETLK64 9 #define F_SETLKW64 10 diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 0c705c3a55ef..67a33cc27ef2 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -8,7 +8,7 @@ #include <asm/errno.h> static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) /* Non-atomic version */ static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int err = 0; - int uval; + u32 val; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) return -EFAULT; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - err = get_user(uval, uaddr); - if (err) return -EFAULT; - if (uval == oldval) - err = put_user(newval, uaddr); - if (err) return -EFAULT; - return uval; + if (get_user(val, uaddr)) + return -EFAULT; + if (val == oldval && put_user(newval, uaddr)) + return -EFAULT; + *uval = val; + return 0; } #endif /*__KERNEL__*/ diff --git a/arch/parisc/include/asm/ioctls.h b/arch/parisc/include/asm/ioctls.h index 6ba80d03623a..054ec06f9e23 100644 --- a/arch/parisc/include/asm/ioctls.h +++ b/arch/parisc/include/asm/ioctls.h @@ -54,6 +54,7 @@ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 05511ccb61d2..45b7389d77aa 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -162,11 +162,8 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) update_process_times(user_mode(get_irq_regs())); } - if (cpu == 0) { - write_seqlock(&xtime_lock); - do_timer(ticks_elapsed); - write_sequnlock(&xtime_lock); - } + if (cpu == 0) + xtime_update(ticks_elapsed); return IRQ_HANDLED; } diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index d64a6bbec2aa..8f1e4efd143e 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -145,7 +145,7 @@ SECTIONS EXIT_DATA } - PERCPU(PAGE_SIZE) + PERCPU(L1_CACHE_BYTES, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 7c589ef81fb0..c94e4a3fe2ef 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -30,7 +30,7 @@ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ : "cr0", "memory") -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int prev; + int ret = 0; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ - cmpw 0,%0,%3\n\ +"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%1,%4\n\ bne- 3f\n" - PPC405_ERR77(0,%2) -"2: stwcx. %4,0,%2\n\ + PPC405_ERR77(0,%3) +"2: stwcx. %5,0,%3\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER "3: .section .fixup,\"ax\"\n\ -4: li %0,%5\n\ +4: li %0,%6\n\ b 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 3\n\ " PPC_LONG "1b,4b,2b,4b\n\ .previous" \ - : "=&r" (prev), "+m" (*uaddr) + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "cc", "memory"); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h index c7dc17cf84f1..e9b78870aaab 100644 --- a/arch/powerpc/include/asm/ioctls.h +++ b/arch/powerpc/include/asm/ioctls.h @@ -96,6 +96,7 @@ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 380d48bacd16..26b8c807f8f1 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -33,9 +33,25 @@ // //---------------------------------------------------------------------------- #include <linux/cache.h> +#include <linux/threads.h> #include <asm/types.h> #include <asm/mmu.h> +/* + * We only have to have statically allocated lppaca structs on + * legacy iSeries, which supports at most 64 cpus. + */ +#ifdef CONFIG_PPC_ISERIES +#if NR_CPUS < 64 +#define NR_LPPACAS NR_CPUS +#else +#define NR_LPPACAS 64 +#endif +#else /* not iSeries */ +#define NR_LPPACAS 1 +#endif + + /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k * alignment is sufficient to prevent this */ struct lppaca { diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 51e9e6f90d12..5e156e034fe2 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -164,13 +164,23 @@ extern void setup_indirect_pci(struct pci_controller* hose, resource_size_t cfg_addr, resource_size_t cfg_data, u32 flags); -#ifndef CONFIG_PPC64 - static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) { return bus->sysdata; } +#ifndef CONFIG_PPC64 + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + struct pci_controller *host; + + if (bus->self) + return pci_device_to_OF_node(bus->self); + host = pci_bus_to_host(bus); + return host ? host->dn : NULL; +} + static inline int isa_vaddr_is_ioport(void __iomem *address) { /* No specific ISA handling on ppc32 at this stage, it @@ -218,19 +228,10 @@ extern void * update_dn_pci_info(struct device_node *dn, void *data); /* Get a device_node from a pci_dev. This code must be fast except * in the case where the sysdata is incorrect and needs to be fixed - * up (this will only happen once). - * In this case the sysdata will have been inherited from a PCI host - * bridge or a PCI-PCI bridge further up the tree, so it will point - * to a valid struct pci_dn, just not the one we want. - */ + * up (this will only happen once). */ static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) { - struct device_node *dn = dev->sysdata; - struct pci_dn *pdn = dn->data; - - if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number) - return dn; /* fast path. sysdata is good */ - return fetch_dev_dn(dev); + return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev); } static inline int pci_device_from_OF_node(struct device_node *np, @@ -248,7 +249,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) if (bus->self) return pci_device_to_OF_node(bus->self); else - return bus->sysdata; /* Must be root bus (PHB) */ + return bus->dev.of_node; /* Must be root bus (PHB) */ } /** Find the bus corresponding to the indicated device node */ @@ -260,14 +261,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus); /** Discover new pci devices under this bus, and add them */ extern void pcibios_add_pci_devices(struct pci_bus *bus); -static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) -{ - struct device_node *busdn = bus->sysdata; - - BUG_ON(busdn == NULL); - return PCI_DN(busdn)->phb; -} - extern void isa_bridge_find_early(struct pci_controller *hose); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a20a9ad2258b..7d7790954e02 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -201,7 +201,7 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, extern void pcibios_setup_bus_devices(struct pci_bus *bus); extern void pcibios_setup_bus_self(struct pci_bus *bus); extern void pcibios_setup_phb_io_space(struct pci_controller *hose); -extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata); +extern void pcibios_scan_phb(struct pci_controller *hose); #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index d72757585595..c189aa5fe1f4 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -70,21 +70,6 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; } #endif #define of_node_to_nid of_node_to_nid -/** - * of_irq_map_pci - Resolve the interrupt for a PCI device - * @pdev: the device whose interrupt is to be resolved - * @out_irq: structure of_irq filled by this function - * - * This function resolves the PCI interrupt for a given PCI device. If a - * device-node exists for a given pci_dev, it will use normal OF tree - * walking. If not, it will implement standard swizzling and walk up the - * PCI tree until an device-node is found, at which point it will finish - * resolving using the OF tree walking. - */ -struct pci_dev; -struct of_irq; -extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); - extern void of_instantiate_rtc(void); /* These includes are put at the bottom because they may contain things diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 8447d89fbe72..bb1e2cdeb9bf 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h @@ -13,11 +13,6 @@ * by Paul Mackerras <paulus@samba.org>. */ -#include <linux/list.h> -#include <linux/spinlock.h> -#include <asm/atomic.h> -#include <asm/system.h> - /* * the semaphore definition */ @@ -33,47 +28,6 @@ #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -struct rw_semaphore { - long count; - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ \ - RWSEM_UNLOCKED_VALUE, \ - __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) \ -} - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ - do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ - } while (0) - /* * lock for reading */ @@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return sem->count != 0; -} - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_RWSEM_H */ diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index f62efdfd1769..c00d4ca1ee15 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -201,13 +201,14 @@ int ibmebus_register_driver(struct of_platform_driver *drv) /* If the driver uses devices that ibmebus doesn't know, add them */ ibmebus_create_devices(drv->driver.of_match_table); - return of_register_driver(drv, &ibmebus_bus_type); + drv->driver.bus = &ibmebus_bus_type; + return driver_register(&drv->driver); } EXPORT_SYMBOL(ibmebus_register_driver); void ibmebus_unregister_driver(struct of_platform_driver *drv) { - of_unregister_driver(drv); + driver_unregister(&drv->driver); } EXPORT_SYMBOL(ibmebus_unregister_driver); @@ -308,15 +309,410 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, } } + static struct bus_attribute ibmebus_bus_attrs[] = { __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), __ATTR_NULL }; +static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) +{ + const struct of_device_id *matches = drv->of_match_table; + + if (!matches) + return 0; + + return of_match_device(matches, dev) != NULL; +} + +static int ibmebus_bus_device_probe(struct device *dev) +{ + int error = -ENODEV; + struct of_platform_driver *drv; + struct platform_device *of_dev; + const struct of_device_id *match; + + drv = to_of_platform_driver(dev->driver); + of_dev = to_platform_device(dev); + + if (!drv->probe) + return error; + + of_dev_get(of_dev); + + match = of_match_device(drv->driver.of_match_table, dev); + if (match) + error = drv->probe(of_dev, match); + if (error) + of_dev_put(of_dev); + + return error; +} + +static int ibmebus_bus_device_remove(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + + if (dev->driver && drv->remove) + drv->remove(of_dev); + return 0; +} + +static void ibmebus_bus_device_shutdown(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + + if (dev->driver && drv->shutdown) + drv->shutdown(of_dev); +} + +/* + * ibmebus_bus_device_attrs + */ +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *ofdev; + + ofdev = to_platform_device(dev); + return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); +} + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *ofdev; + + ofdev = to_platform_device(dev); + return sprintf(buf, "%s\n", ofdev->dev.of_node->name); +} + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); + buf[len] = '\n'; + buf[len+1] = 0; + return len+1; +} + +struct device_attribute ibmebus_bus_device_attrs[] = { + __ATTR_RO(devspec), + __ATTR_RO(name), + __ATTR_RO(modalias), + __ATTR_NULL +}; + +#ifdef CONFIG_PM_SLEEP +static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + int ret = 0; + + if (dev->driver && drv->suspend) + ret = drv->suspend(of_dev, mesg); + return ret; +} + +static int ibmebus_bus_legacy_resume(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + int ret = 0; + + if (dev->driver && drv->resume) + ret = drv->resume(of_dev); + return ret; +} + +static int ibmebus_bus_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +static void ibmebus_bus_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int ibmebus_bus_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int ibmebus_bus_pm_suspend_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend_noirq) + ret = drv->pm->suspend_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_resume_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume_noirq) + ret = drv->pm->resume_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_SUSPEND */ + +#define ibmebus_bus_pm_suspend NULL +#define ibmebus_bus_pm_resume NULL +#define ibmebus_bus_pm_suspend_noirq NULL +#define ibmebus_bus_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATION + +static int ibmebus_bus_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +static int ibmebus_bus_pm_freeze_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze_noirq) + ret = drv->pm->freeze_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_thaw_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw_noirq) + ret = drv->pm->thaw_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff_noirq) + ret = drv->pm->poweroff_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_restore_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore_noirq) + ret = drv->pm->restore_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_HIBERNATION */ + +#define ibmebus_bus_pm_freeze NULL +#define ibmebus_bus_pm_thaw NULL +#define ibmebus_bus_pm_poweroff NULL +#define ibmebus_bus_pm_restore NULL +#define ibmebus_bus_pm_freeze_noirq NULL +#define ibmebus_bus_pm_thaw_noirq NULL +#define ibmebus_bus_pm_poweroff_noirq NULL +#define ibmebus_bus_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATION */ + +static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { + .prepare = ibmebus_bus_pm_prepare, + .complete = ibmebus_bus_pm_complete, + .suspend = ibmebus_bus_pm_suspend, + .resume = ibmebus_bus_pm_resume, + .freeze = ibmebus_bus_pm_freeze, + .thaw = ibmebus_bus_pm_thaw, + .poweroff = ibmebus_bus_pm_poweroff, + .restore = ibmebus_bus_pm_restore, + .suspend_noirq = ibmebus_bus_pm_suspend_noirq, + .resume_noirq = ibmebus_bus_pm_resume_noirq, + .freeze_noirq = ibmebus_bus_pm_freeze_noirq, + .thaw_noirq = ibmebus_bus_pm_thaw_noirq, + .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, + .restore_noirq = ibmebus_bus_pm_restore_noirq, +}; + +#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) + +#else /* !CONFIG_PM_SLEEP */ + +#define IBMEBUS_BUS_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + struct bus_type ibmebus_bus_type = { + .name = "ibmebus", .uevent = of_device_uevent, - .bus_attrs = ibmebus_bus_attrs + .bus_attrs = ibmebus_bus_attrs, + .match = ibmebus_bus_bus_match, + .probe = ibmebus_bus_device_probe, + .remove = ibmebus_bus_device_remove, + .shutdown = ibmebus_bus_device_shutdown, + .dev_attrs = ibmebus_bus_device_attrs, + .pm = IBMEBUS_BUS_PM_OPS_PTR, }; EXPORT_SYMBOL(ibmebus_bus_type); @@ -326,7 +722,7 @@ static int __init ibmebus_bus_init(void) printk(KERN_INFO "IBM eBus Device Driver\n"); - err = of_bus_type_init(&ibmebus_bus_type, "ibmebus"); + err = bus_register(&ibmebus_bus_type); if (err) { printk(KERN_ERR "%s: failed to register IBM eBus.\n", __func__); diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index b2c363ef38ad..24582181b6ec 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -36,8 +36,7 @@ * lacking some bits needed here. */ -static int __devinit of_pci_phb_probe(struct platform_device *dev, - const struct of_device_id *match) +static int __devinit of_pci_phb_probe(struct platform_device *dev) { struct pci_controller *phb; @@ -74,7 +73,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev, #endif /* CONFIG_EEH */ /* Scan the bus */ - pcibios_scan_phb(phb, dev->dev.of_node); + pcibios_scan_phb(phb); if (phb->bus == NULL) return -ENXIO; @@ -104,7 +103,7 @@ static struct of_device_id of_pci_phb_ids[] = { {} }; -static struct of_platform_driver of_pci_phb_driver = { +static struct platform_driver of_pci_phb_driver = { .probe = of_pci_phb_probe, .driver = { .name = "of-pci", @@ -115,7 +114,7 @@ static struct of_platform_driver of_pci_phb_driver = { static __init int of_pci_phb_init(void) { - return of_register_platform_driver(&of_pci_phb_driver); + return platform_driver_register(&of_pci_phb_driver); } device_initcall(of_pci_phb_init); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ebf9846f3c3b..f4adf89d7614 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -27,20 +27,6 @@ extern unsigned long __toc_start; #ifdef CONFIG_PPC_BOOK3S /* - * We only have to have statically allocated lppaca structs on - * legacy iSeries, which supports at most 64 cpus. - */ -#ifdef CONFIG_PPC_ISERIES -#if NR_CPUS < 64 -#define NR_LPPACAS NR_CPUS -#else -#define NR_LPPACAS 64 -#endif -#else /* not iSeries */ -#define NR_LPPACAS 1 -#endif - -/* * The structure which the hypervisor knows about - this structure * should not cross a page boundary. The vpa_init/register_vpa call * is now known to fail if the lppaca structure crosses a page diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 10a44e68ef11..3cd85faa8ac6 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/of_address.h> +#include <linux/of_pci.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> @@ -1687,13 +1688,8 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure - * @sysdata: value to use for sysdata pointer. ppc32 and ppc64 differ here - * - * Note: the 'data' pointer is a temporary measure. As 32 and 64 bit - * pci code gets merged, this parameter should become unnecessary because - * both will use the same value. */ -void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) +void __devinit pcibios_scan_phb(struct pci_controller *hose) { struct pci_bus *bus; struct device_node *node = hose->dn; @@ -1703,13 +1699,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) node ? node->full_name : "<NO NAME>"); /* Create an empty bus for the toplevel */ - bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, - sysdata); + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); if (bus == NULL) { pr_err("Failed to create bus for PCI domain %04x\n", hose->global_number); return; } + bus->dev.of_node = of_node_get(node); bus->secondary = hose->first_busno; hose->bus = bus; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index e7db5b48004a..bedb370459f2 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -381,7 +381,7 @@ static int __init pcibios_init(void) if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; - pcibios_scan_phb(hose, hose); + pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 851577608a78..fc6452b6be9f 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -64,7 +64,7 @@ static int __init pcibios_init(void) /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - pcibios_scan_phb(hose, hose->dn); + pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); } @@ -242,10 +242,10 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, break; bus = NULL; } - if (bus == NULL || bus->sysdata == NULL) + if (bus == NULL || bus->dev.of_node == NULL) return -ENODEV; - hose_node = (struct device_node *)bus->sysdata; + hose_node = bus->dev.of_node; hose = PCI_DN(hose_node)->phb; switch (which) { diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index d56b35ee7f74..29852688ceaa 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -161,7 +161,7 @@ static void *is_devfn_node(struct device_node *dn, void *data) /* * This is the "slow" path for looking up a device_node from a * pci_dev. It will hunt for the device under its parent's - * phb and then update sysdata for a future fastpath. + * phb and then update of_node pointer. * * It may also do fixups on the actual device since this happens * on the first read/write. @@ -170,16 +170,19 @@ static void *is_devfn_node(struct device_node *dn, void *data) * In this case it may probe for real hardware ("just in case") * and add a device_node to the device tree if necessary. * + * Is this function necessary anymore now that dev->dev.of_node is + * used to store the node pointer? + * */ struct device_node *fetch_dev_dn(struct pci_dev *dev) { - struct device_node *orig_dn = dev->sysdata; + struct device_node *orig_dn = dev->dev.of_node; struct device_node *dn; unsigned long searchval = (dev->bus->number << 8) | dev->devfn; dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); if (dn) - dev->sysdata = dn; + dev->dev.of_node = dn; return dn; } EXPORT_SYMBOL(fetch_dev_dn); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e751506323b4..1e89a72fd030 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -135,7 +135,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); dev->bus = bus; - dev->sysdata = node; + dev->dev.of_node = of_node_get(node); dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; dev->devfn = devfn; @@ -238,7 +238,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, bus->primary = dev->bus->number; bus->subordinate = busrange[1]; bus->bridge_ctl = 0; - bus->sysdata = node; + bus->dev.of_node = of_node_get(node); /* parse ranges property */ /* PCI #address-cells == 3 and #size-cells == 2 always */ diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index c2b7a07cc3d3..47187cc2cf00 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -2,95 +2,11 @@ #include <linux/kernel.h> #include <linux/string.h> -#include <linux/pci_regs.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/etherdevice.h> #include <linux/of_address.h> #include <asm/prom.h> -#include <asm/pci-bridge.h> - -#ifdef CONFIG_PCI -int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) -{ - struct device_node *dn, *ppnode; - struct pci_dev *ppdev; - u32 lspec; - u32 laddr[3]; - u8 pin; - int rc; - - /* Check if we have a device node, if yes, fallback to standard OF - * parsing - */ - dn = pci_device_to_OF_node(pdev); - if (dn) { - rc = of_irq_map_one(dn, 0, out_irq); - if (!rc) - return rc; - } - - /* Ok, we don't, time to have fun. Let's start by building up an - * interrupt spec. we assume #interrupt-cells is 1, which is standard - * for PCI. If you do different, then don't use that routine. - */ - rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); - if (rc != 0) - return rc; - /* No pin, exit */ - if (pin == 0) - return -ENODEV; - - /* Now we walk up the PCI tree */ - lspec = pin; - for (;;) { - /* Get the pci_dev of our parent */ - ppdev = pdev->bus->self; - - /* Ouch, it's a host bridge... */ - if (ppdev == NULL) { -#ifdef CONFIG_PPC64 - ppnode = pci_bus_to_OF_node(pdev->bus); -#else - struct pci_controller *host; - host = pci_bus_to_host(pdev->bus); - ppnode = host ? host->dn : NULL; -#endif - /* No node for host bridge ? give up */ - if (ppnode == NULL) - return -EINVAL; - } else - /* We found a P2P bridge, check if it has a node */ - ppnode = pci_device_to_OF_node(ppdev); - - /* Ok, we have found a parent with a device-node, hand over to - * the OF parsing code. - * We build a unit address from the linux device to be used for - * resolution. Note that we use the linux bus number which may - * not match your firmware bus numbering. - * Fortunately, in most cases, interrupt-map-mask doesn't include - * the bus number as part of the matching. - * You should still be careful about that though if you intend - * to rely on this function (you ship a firmware that doesn't - * create device nodes for all PCI devices). - */ - if (ppnode) - break; - - /* We can only get here if we hit a P2P bridge with no node, - * let's do standard swizzling and try again - */ - lspec = pci_swizzle_interrupt_pin(pdev, lspec); - pdev = ppdev; - } - - laddr[0] = (pdev->bus->number << 16) - | (pdev->devfn << 8); - laddr[1] = laddr[2] = 0; - return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); -} -EXPORT_SYMBOL_GPL(of_irq_map_pci); -#endif /* CONFIG_PCI */ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8a0deefac08d..b9150f07d266 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -160,7 +160,7 @@ SECTIONS INIT_RAM_FS } - PERCPU(PAGE_SIZE) + PERCPU(L1_CACHE_BYTES, PAGE_SIZE) . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index fd4812329570..0dc95c0aa3be 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1516,7 +1516,8 @@ int start_topology_update(void) { int rc = 0; - if (firmware_has_feature(FW_FEATURE_VPHN) && + /* Disabled until races with load balancing are fixed */ + if (0 && firmware_has_feature(FW_FEATURE_VPHN) && get_lppaca()->shared_proc) { vphn_enabled = 1; setup_cpu_associativity_change_counters(); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c index 0dad9a935eb5..1757d1db4b51 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c @@ -147,8 +147,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) return 0; } -static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) { struct mpc52xx_gpiochip *chip; struct mpc52xx_gpio_wkup __iomem *regs; @@ -191,7 +190,7 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { {} }; -static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { +static struct platform_driver mpc52xx_wkup_gpiochip_driver = { .driver = { .name = "gpio_wkup", .owner = THIS_MODULE, @@ -310,8 +309,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) return 0; } -static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) { struct mpc52xx_gpiochip *chip; struct gpio_chip *gc; @@ -349,7 +347,7 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { {} }; -static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { +static struct platform_driver mpc52xx_simple_gpiochip_driver = { .driver = { .name = "gpio", .owner = THIS_MODULE, @@ -361,10 +359,10 @@ static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { static int __init mpc52xx_gpio_init(void) { - if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver)) + if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); - if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver)) + if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) printk(KERN_ERR "Unable to register simple GPIO driver\n"); return 0; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index e0d703c7fdf7..859abf1c6d4b 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -721,8 +721,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, /* --------------------------------------------------------------------- * of_platform bus binding code */ -static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev) { struct mpc52xx_gpt_priv *gpt; @@ -781,7 +780,7 @@ static const struct of_device_id mpc52xx_gpt_match[] = { {} }; -static struct of_platform_driver mpc52xx_gpt_driver = { +static struct platform_driver mpc52xx_gpt_driver = { .driver = { .name = "mpc52xx-gpt", .owner = THIS_MODULE, @@ -793,10 +792,7 @@ static struct of_platform_driver mpc52xx_gpt_driver = { static int __init mpc52xx_gpt_init(void) { - if (of_register_platform_driver(&mpc52xx_gpt_driver)) - pr_err("error registering MPC52xx GPT driver\n"); - - return 0; + return platform_driver_register(&mpc52xx_gpt_driver); } /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index f4ac213c89c0..6385d883cb8d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -436,8 +436,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) } EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); -static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) { struct resource res; int rc = -ENOMEM; @@ -536,7 +535,7 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { {}, }; -static struct of_platform_driver mpc52xx_lpbfifo_driver = { +static struct platform_driver mpc52xx_lpbfifo_driver = { .driver = { .name = "mpc52xx-lpbfifo", .owner = THIS_MODULE, @@ -551,14 +550,12 @@ static struct of_platform_driver mpc52xx_lpbfifo_driver = { */ static int __init mpc52xx_lpbfifo_init(void) { - pr_debug("Registering LocalPlus bus FIFO driver\n"); - return of_register_platform_driver(&mpc52xx_lpbfifo_driver); + return platform_driver_register(&mpc52xx_lpbfifo_driver); } module_init(mpc52xx_lpbfifo_init); static void __exit mpc52xx_lpbfifo_exit(void) { - pr_debug("Unregistering LocalPlus bus FIFO driver\n"); - of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); + platform_driver_unregister(&mpc52xx_lpbfifo_driver); } module_exit(mpc52xx_lpbfifo_exit); diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 1565e0446dc8..10ff526cd046 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -111,8 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = { .ops = &ep8248e_mdio_ops, }; -static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev) { struct mii_bus *bus; struct resource res; @@ -167,7 +166,7 @@ static const struct of_device_id ep8248e_mdio_match[] = { {}, }; -static struct of_platform_driver ep8248e_mdio_driver = { +static struct platform_driver ep8248e_mdio_driver = { .driver = { .name = "ep8248e-mdio-bitbang", .owner = THIS_MODULE, @@ -308,7 +307,7 @@ static __initdata struct of_device_id of_bus_ids[] = { static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); - of_register_platform_driver(&ep8248e_mdio_driver); + platform_driver_register(&ep8248e_mdio_driver); return 0; } diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index fd4f2f2f19e6..188272934cfb 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -318,14 +318,18 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { .end = mpc83xx_suspend_end, }; -static int pmc_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int pmc_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct resource res; - struct pmc_type *type = match->data; + struct pmc_type *type; int ret = 0; + if (!ofdev->dev.of_match) + return -EINVAL; + + type = ofdev->dev.of_match->data; + if (!of_device_is_available(np)) return -ENODEV; @@ -422,7 +426,7 @@ static struct of_device_id pmc_match[] = { {} }; -static struct of_platform_driver pmc_driver = { +static struct platform_driver pmc_driver = { .driver = { .name = "mpc83xx-pmc", .owner = THIS_MODULE, @@ -434,7 +438,7 @@ static struct of_platform_driver pmc_driver = { static int pmc_init(void) { - return of_register_platform_driver(&pmc_driver); + return platform_driver_register(&pmc_driver); } module_init(pmc_init); diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index e3e379c6caa7..c35099af340e 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -328,7 +328,7 @@ static struct irq_host_ops msic_host_ops = { .map = msic_host_map, }; -static int axon_msi_shutdown(struct platform_device *device) +static void axon_msi_shutdown(struct platform_device *device) { struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; @@ -338,12 +338,9 @@ static int axon_msi_shutdown(struct platform_device *device) tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); - - return 0; } -static int axon_msi_probe(struct platform_device *device, - const struct of_device_id *device_id) +static int axon_msi_probe(struct platform_device *device) { struct device_node *dn = device->dev.of_node; struct axon_msic *msic; @@ -446,7 +443,7 @@ static const struct of_device_id axon_msi_device_id[] = { {} }; -static struct of_platform_driver axon_msi_driver = { +static struct platform_driver axon_msi_driver = { .probe = axon_msi_probe, .shutdown = axon_msi_shutdown, .driver = { @@ -458,7 +455,7 @@ static struct of_platform_driver axon_msi_driver = { static int __init axon_msi_init(void) { - return of_register_platform_driver(&axon_msi_driver); + return platform_driver_register(&axon_msi_driver); } subsys_initcall(axon_msi_init); diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 187a7d32f86a..a3d2ce54ea2e 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, if (!IS_ERR(tmp)) { struct nameidata nd; - ret = path_lookup(tmp, LOOKUP_PARENT, &nd); + ret = kern_path_parent(tmp, &nd); if (!ret) { nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; ret = spufs_create(&nd, flags, mode, neighbor); diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index fdb7384c0c4f..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c @@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); - for (i = 0; i < NR_CPUS; i++) { - if (lppaca_of(i).dyn_proc_status >= 2) + for (i = 0; i < NR_LPPACAS; i++) { + if (lppaca[i].dyn_proc_status >= 2) continue; snprintf(p, 32 - (p - buf), "@%d", i); @@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) dt_prop_str(dt, "device_type", device_type_cpu); - index = lppaca_of(i).dyn_hv_phys_proc_index; + index = lppaca[i].dyn_hv_phys_proc_index; d = &xIoHriProcessorVpd[index]; dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..2946ae10fbfd 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void) * on but calling this function multiple times is fine. */ identify_cpu(0, mfspr(SPRN_PVR)); + initialise_paca(&boot_paca, 0); powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_LPAR; diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index a5d907b5a4c2..9886296e08da 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -216,8 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus) } -static int __devinit gpio_mdio_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit gpio_mdio_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; @@ -299,7 +298,7 @@ static struct of_device_id gpio_mdio_match[] = }; MODULE_DEVICE_TABLE(of, gpio_mdio_match); -static struct of_platform_driver gpio_mdio_driver = +static struct platform_driver gpio_mdio_driver = { .probe = gpio_mdio_probe, .remove = gpio_mdio_remove, @@ -326,13 +325,13 @@ int gpio_mdio_init(void) if (!gpio_regs) return -ENODEV; - return of_register_platform_driver(&gpio_mdio_driver); + return platform_driver_register(&gpio_mdio_driver); } module_init(gpio_mdio_init); void gpio_mdio_exit(void) { - of_unregister_platform_driver(&gpio_mdio_driver); + platform_driver_unregister(&gpio_mdio_driver); if (gpio_regs) iounmap(gpio_regs); } diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index bc8803664140..33867ec4a234 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -17,6 +17,54 @@ #include <asm/pSeries_reconfig.h> #include <asm/sparsemem.h> +static unsigned long get_memblock_size(void) +{ + struct device_node *np; + unsigned int memblock_size = 0; + + np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); + if (np) { + const unsigned long *size; + + size = of_get_property(np, "ibm,lmb-size", NULL); + memblock_size = size ? *size : 0; + + of_node_put(np); + } else { + unsigned int memzero_size = 0; + const unsigned int *regs; + + np = of_find_node_by_path("/memory@0"); + if (np) { + regs = of_get_property(np, "reg", NULL); + memzero_size = regs ? regs[3] : 0; + of_node_put(np); + } + + if (memzero_size) { + /* We now know the size of memory@0, use this to find + * the first memoryblock and get its size. + */ + char buf[64]; + + sprintf(buf, "/memory@%x", memzero_size); + np = of_find_node_by_path(buf); + if (np) { + regs = of_get_property(np, "reg", NULL); + memblock_size = regs ? regs[3] : 0; + of_node_put(np); + } + } + } + + return memblock_size; +} + +unsigned long memory_block_size_bytes(void) +{ + return get_memblock_size(); +} + static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; @@ -127,30 +175,22 @@ static int pseries_add_memory(struct device_node *np) static int pseries_drconf_memory(unsigned long *base, unsigned int action) { - struct device_node *np; - const unsigned long *lmb_size; + unsigned long memblock_size; int rc; - np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); - if (!np) + memblock_size = get_memblock_size(); + if (!memblock_size) return -EINVAL; - lmb_size = of_get_property(np, "ibm,lmb-size", NULL); - if (!lmb_size) { - of_node_put(np); - return -EINVAL; - } - if (action == PSERIES_DRCONF_MEM_ADD) { - rc = memblock_add(*base, *lmb_size); + rc = memblock_add(*base, memblock_size); rc = (rc < 0) ? -EINVAL : 0; } else if (action == PSERIES_DRCONF_MEM_REMOVE) { - rc = pseries_remove_memblock(*base, *lmb_size); + rc = pseries_remove_memblock(*base, memblock_size); } else { rc = -EINVAL; } - of_node_put(np); return rc; } diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 5fcc92a12d3e..3bf4488aaec6 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -149,7 +149,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) if (dn->child) eeh_add_device_tree_early(dn); - pcibios_scan_phb(phb, dn); + pcibios_scan_phb(phb); pcibios_finish_adding_to_bus(phb->bus); return phb; diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 2659a60bd7b8..27402c7d309d 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -172,10 +172,9 @@ static const struct block_device_operations axon_ram_devops = { /** * axon_ram_probe - probe() method for platform driver - * @device, @device_id: see of_platform_driver method + * @device: see platform_driver method */ -static int axon_ram_probe(struct platform_device *device, - const struct of_device_id *device_id) +static int axon_ram_probe(struct platform_device *device) { static int axon_ram_bank_id = -1; struct axon_ram_bank *bank; @@ -326,7 +325,7 @@ static struct of_device_id axon_ram_device_id[] = { {} }; -static struct of_platform_driver axon_ram_driver = { +static struct platform_driver axon_ram_driver = { .probe = axon_ram_probe, .remove = axon_ram_remove, .driver = { @@ -350,7 +349,7 @@ axon_ram_init(void) } azfs_minor = 0; - return of_register_platform_driver(&axon_ram_driver); + return platform_driver_register(&axon_ram_driver); } /** @@ -359,7 +358,7 @@ axon_ram_init(void) static void __exit axon_ram_exit(void) { - of_unregister_platform_driver(&axon_ram_driver); + platform_driver_unregister(&axon_ram_driver); unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); } diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index 650256115064..b3fbb271be87 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c @@ -365,8 +365,7 @@ bcom_engine_cleanup(void) /* OF platform driver */ /* ======================================================================== */ -static int __devinit mpc52xx_bcom_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit mpc52xx_bcom_probe(struct platform_device *op) { struct device_node *ofn_sram; struct resource res_bcom; @@ -492,7 +491,7 @@ static struct of_device_id mpc52xx_bcom_of_match[] = { MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); -static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { +static struct platform_driver mpc52xx_bcom_of_platform_driver = { .probe = mpc52xx_bcom_probe, .remove = mpc52xx_bcom_remove, .driver = { @@ -510,13 +509,13 @@ static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { static int __init mpc52xx_bcom_init(void) { - return of_register_platform_driver(&mpc52xx_bcom_of_platform_driver); + return platform_driver_register(&mpc52xx_bcom_of_platform_driver); } static void __exit mpc52xx_bcom_exit(void) { - of_unregister_platform_driver(&mpc52xx_bcom_of_platform_driver); + platform_driver_unregister(&mpc52xx_bcom_of_platform_driver); } /* If we're not a module, we must make sure everything is setup before */ diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c index cc8d6556d799..2b9f0c925326 100644 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c @@ -71,8 +71,7 @@ static int __init get_offset_from_cmdline(char *str) __setup("cache-sram-size=", get_size_from_cmdline); __setup("cache-sram-offset=", get_offset_from_cmdline); -static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev, - const struct of_device_id *match) +static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) { long rval; unsigned int rem; @@ -204,7 +203,7 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = { {}, }; -static struct of_platform_driver mpc85xx_l2ctlr_of_platform_driver = { +static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { .driver = { .name = "fsl-l2ctlr", .owner = THIS_MODULE, @@ -216,12 +215,12 @@ static struct of_platform_driver mpc85xx_l2ctlr_of_platform_driver = { static __init int mpc85xx_l2ctlr_of_init(void) { - return of_register_platform_driver(&mpc85xx_l2ctlr_of_platform_driver); + return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver); } static void __exit mpc85xx_l2ctlr_of_exit(void) { - of_unregister_platform_driver(&mpc85xx_l2ctlr_of_platform_driver); + platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver); } subsys_initcall(mpc85xx_l2ctlr_of_init); diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 108d76fa8f1c..ee6a8a52ac71 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -273,8 +273,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) return 0; } -static int __devinit fsl_of_msi_probe(struct platform_device *dev, - const struct of_device_id *match) +static int __devinit fsl_of_msi_probe(struct platform_device *dev) { struct fsl_msi *msi; struct resource res; @@ -282,11 +281,15 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev, int rc; int virt_msir; const u32 *p; - struct fsl_msi_feature *features = match->data; + struct fsl_msi_feature *features; struct fsl_msi_cascade_data *cascade_data = NULL; int len; u32 offset; + if (!dev->dev.of_match) + return -EINVAL; + features = dev->dev.of_match->data; + printk(KERN_DEBUG "Setting up Freescale MSI support\n"); msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); @@ -411,7 +414,7 @@ static const struct of_device_id fsl_of_msi_ids[] = { {} }; -static struct of_platform_driver fsl_of_msi_driver = { +static struct platform_driver fsl_of_msi_driver = { .driver = { .name = "fsl-msi", .owner = THIS_MODULE, @@ -423,7 +426,7 @@ static struct of_platform_driver fsl_of_msi_driver = { static __init int fsl_of_msi_init(void) { - return of_register_platform_driver(&fsl_of_msi_driver); + return platform_driver_register(&fsl_of_msi_driver); } subsys_initcall(fsl_of_msi_init); diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index e9381bfefb21..f122e8961d32 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -58,8 +58,7 @@ static const struct platform_suspend_ops pmc_suspend_ops = { .enter = pmc_suspend_enter, }; -static int pmc_probe(struct platform_device *ofdev, - const struct of_device_id *id) +static int pmc_probe(struct platform_device *ofdev) { pmc_regs = of_iomap(ofdev->dev.of_node, 0); if (!pmc_regs) @@ -76,7 +75,7 @@ static const struct of_device_id pmc_ids[] = { { }, }; -static struct of_platform_driver pmc_driver = { +static struct platform_driver pmc_driver = { .driver = { .name = "fsl-pmc", .owner = THIS_MODULE, @@ -87,6 +86,6 @@ static struct of_platform_driver pmc_driver = { static int __init pmc_init(void) { - return of_register_platform_driver(&pmc_driver); + return platform_driver_register(&pmc_driver); } device_initcall(pmc_init); diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 8c6cab013278..3eff2c3a9ad5 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -1570,8 +1570,7 @@ err_ops: /* The probe function for RapidIO peer-to-peer network. */ -static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev, - const struct of_device_id *match) +static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) { int rc; printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", @@ -1594,7 +1593,7 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = { {}, }; -static struct of_platform_driver fsl_of_rio_rpn_driver = { +static struct platform_driver fsl_of_rio_rpn_driver = { .driver = { .name = "fsl-of-rio", .owner = THIS_MODULE, @@ -1605,7 +1604,7 @@ static struct of_platform_driver fsl_of_rio_rpn_driver = { static __init int fsl_of_rio_rpn_init(void) { - return of_register_platform_driver(&fsl_of_rio_rpn_driver); + return platform_driver_register(&fsl_of_rio_rpn_driver); } subsys_initcall(fsl_of_rio_rpn_init); diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 4260f368db52..8ce4fc3d9828 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c @@ -121,8 +121,7 @@ static void pmi_notify_handlers(struct work_struct *work) spin_unlock(&data->handler_spinlock); } -static int pmi_of_probe(struct platform_device *dev, - const struct of_device_id *match) +static int pmi_of_probe(struct platform_device *dev) { struct device_node *np = dev->dev.of_node; int rc; @@ -205,7 +204,7 @@ static int pmi_of_remove(struct platform_device *dev) return 0; } -static struct of_platform_driver pmi_of_platform_driver = { +static struct platform_driver pmi_of_platform_driver = { .probe = pmi_of_probe, .remove = pmi_of_remove, .driver = { @@ -217,13 +216,13 @@ static struct of_platform_driver pmi_of_platform_driver = { static int __init pmi_module_init(void) { - return of_register_platform_driver(&pmi_of_platform_driver); + return platform_driver_register(&pmi_of_platform_driver); } module_init(pmi_module_init); static void __exit pmi_module_exit(void) { - of_unregister_platform_driver(&pmi_of_platform_driver); + platform_driver_unregister(&pmi_of_platform_driver); } module_exit(pmi_module_exit); diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 90020de4dcf2..904c6cbaf45b 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c @@ -659,8 +659,7 @@ static int qe_resume(struct platform_device *ofdev) return 0; } -static int qe_probe(struct platform_device *ofdev, - const struct of_device_id *id) +static int qe_probe(struct platform_device *ofdev) { return 0; } @@ -670,7 +669,7 @@ static const struct of_device_id qe_ids[] = { { }, }; -static struct of_platform_driver qe_driver = { +static struct platform_driver qe_driver = { .driver = { .name = "fsl-qe", .owner = THIS_MODULE, @@ -682,7 +681,7 @@ static struct of_platform_driver qe_driver = { static int __init qe_drv_init(void) { - return of_register_platform_driver(&qe_driver); + return platform_driver_register(&qe_driver); } device_initcall(qe_drv_init); #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 5c5d02de49e9..81cf36b691f1 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -7,7 +7,7 @@ #include <linux/uaccess.h> #include <asm/errno.h> -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, - int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); + return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 423fdda2322d..d0eb4653cebd 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h @@ -43,29 +43,6 @@ #ifdef __KERNEL__ -#include <linux/list.h> -#include <linux/spinlock.h> - -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *); - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #ifndef __s390x__ #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 @@ -81,41 +58,6 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) /* - * initialisation - */ - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - - -/* * lock for reading */ static inline void __down_read(struct rw_semaphore *sem) @@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) return new; } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _S390_RWSEM_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0ec52b..2d9ea11f919a 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -83,8 +83,8 @@ struct uaccess_ops { size_t (*clear_user)(size_t, void __user *); size_t (*strnlen_user)(size_t, const char __user *); size_t (*strncpy_from_user)(size_t, const char __user *, char *); - int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(int __user *, int old, int new); + int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); }; extern struct uaccess_ops uaccess; diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a68ac10213b2..1bc18cdb525b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -77,7 +77,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) - PERCPU(PAGE_SIZE) + PERCPU(0x100, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 126011df14f1..1d2536cb630b 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); extern size_t copy_to_user_std(size_t, void __user *, const void *); extern size_t strnlen_user_std(size_t, const char __user *); extern size_t strncpy_from_user_std(size_t, const char __user *, char *); -extern int futex_atomic_cmpxchg_std(int __user *, int, int); -extern int futex_atomic_op_std(int, int __user *, int, int *); +extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); +extern int futex_atomic_op_std(int, u32 __user *, int, int *); extern size_t copy_from_user_pt(size_t, const void __user *, void *); extern size_t copy_to_user_pt(size_t, void __user *, const void *); -extern int futex_atomic_op_pt(int, int __user *, int, int *); -extern int futex_atomic_cmpxchg_pt(int __user *, int, int); +extern int futex_atomic_op_pt(int, u32 __user *, int, int *); +extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 404f2de296dc..74833831417f 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -302,7 +302,7 @@ fault: : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc" ); -static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) +static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) +int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) { int ret; @@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) +static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; asm volatile("0: cs %1,%4,0(%5)\n" - "1: lr %0,%1\n" + "1: la %0,0\n" "2:\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); + *uval = oldval; return ret; } -int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) +int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; if (segment_eq(get_fs(), KERNEL_DS)) - return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); + return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); spin_lock(¤t->mm->page_table_lock); uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); if (!uaddr) { @@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) } get_page(virt_to_page(uaddr)); spin_unlock(¤t->mm->page_table_lock); - ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); + ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); put_page(virt_to_page(uaddr)); return ret; } diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index a6c4f7ed24a4..bb1a7eed42ce 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c @@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc"); -int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) +int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) +int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; asm volatile( " sacf 256\n" "0: cs %1,%4,0(%5)\n" - "1: lr %0,%1\n" + "1: la %0,0\n" "2: sacf 0\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); + *uval = oldval; return ret; } diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h index a9f16a7f9aea..6cb9f193a95e 100644 --- a/arch/sh/include/asm/futex-irq.h +++ b/arch/sh/include/asm/futex-irq.h @@ -3,7 +3,7 @@ #include <asm/system.h> -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, - int oldval, int newval) +static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, + u32 __user *uaddr, + u32 oldval, u32 newval) { unsigned long flags; - int ret, prev = 0; + int ret; + u32 prev = 0; local_irq_save(flags); @@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, local_irq_restore(flags); - if (ret) - return ret; - - return prev; + *uval = prev; + return ret; } #endif /* __ASM_SH_FUTEX_IRQ_H */ diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index 68256ec5fa35..7be39a646fbd 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -10,7 +10,7 @@ /* XXX: UP variants, fix for SH-4A and SMP.. */ #include <asm/futex-irq.h> -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval); + return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/sh/include/asm/ioctls.h b/arch/sh/include/asm/ioctls.h index 84e85a792638..a6769f352bf6 100644 --- a/arch/sh/include/asm/ioctls.h +++ b/arch/sh/include/asm/ioctls.h @@ -87,6 +87,7 @@ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP _IO('T', 0x37) #define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ #define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h index 06e2251a5e48..edab57265293 100644 --- a/arch/sh/include/asm/rwsem.h +++ b/arch/sh/include/asm/rwsem.h @@ -11,64 +11,13 @@ #endif #ifdef __KERNEL__ -#include <linux/list.h> -#include <linux/spinlock.h> -#include <asm/atomic.h> -#include <asm/system.h> -/* - * the semaphore definition - */ -struct rw_semaphore { - long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} /* * lock for reading @@ -179,10 +128,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _ASM_SH_RWSEM_H */ diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index f739061e2ee4..0f325da0f923 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h @@ -1,11 +1,21 @@ #ifndef __ASM_SH_ETH_H__ #define __ASM_SH_ETH_H__ +#include <linux/phy.h> + enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; +enum { + SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_SH4, + SH_ETH_REG_FAST_SH3_SH2 +}; struct sh_eth_plat_data { int phy; int edmac_endian; + int register_type; + phy_interface_t phy_interface; + void (*set_mdio_gate)(unsigned long addr); unsigned char mac_addr[6]; unsigned no_ether_link:1; diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 7f8a709c3ada..af4d46187a79 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -66,7 +66,7 @@ SECTIONS __machvec_end = .; } - PERCPU(PAGE_SIZE) + PERCPU(L1_CACHE_BYTES, PAGE_SIZE) /* * .exit.text is discarded at runtime, not link time, to deal with diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h index 38f37b333cc7..d0b83f66f356 100644 --- a/arch/sparc/include/asm/fcntl.h +++ b/arch/sparc/include/asm/fcntl.h @@ -34,6 +34,8 @@ #define __O_SYNC 0x800000 #define O_SYNC (__O_SYNC|O_DSYNC) +#define O_PATH 0x1000000 + #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ #define F_GETLK 7 diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index 47f95839dc69..444e7bea23bc 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h @@ -30,7 +30,7 @@ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ : "memory") -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) return -EFAULT; if (unlikely((((unsigned long) uaddr) & 0x3UL))) return -EINVAL; @@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { + int ret = 0; + __asm__ __volatile__( - "\n1: casa [%3] %%asi, %2, %0\n" + "\n1: casa [%4] %%asi, %3, %1\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %0\n" " jmpl %0 + %%lo(2b), %%g0\n" - " mov %4, %0\n" + " mov %5, %0\n" " .previous\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" - : "=r" (newval) - : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) + : "+r" (ret), "=r" (newval) + : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) : "memory"); - return newval; + *uval = newval; + return ret; } #endif /* !(_SPARC64_FUTEX_H) */ diff --git a/arch/sparc/include/asm/ioctls.h b/arch/sparc/include/asm/ioctls.h index ed3807b96bb5..28d0c8b02cc3 100644 --- a/arch/sparc/include/asm/ioctls.h +++ b/arch/sparc/include/asm/ioctls.h @@ -20,6 +20,7 @@ #define TCSETSW2 _IOW('T', 14, struct termios2) #define TCSETSF2 _IOW('T', 15, struct termios2) #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ +#define TIOCVHANGUP _IO('T', 0x37) /* Note that all the ioctls that are not available in Linux have a * double underscore on the front to: a) avoid some programs to diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index aa4c82648d88..cb33608cc68f 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -103,7 +103,7 @@ static inline unsigned int get_dma_residue(unsigned int dmanr) return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); } -static int __devinit ecpp_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit ecpp_probe(struct platform_device *op) { unsigned long base = op->resource[0].start; unsigned long config = op->resource[1].start; @@ -235,7 +235,7 @@ static const struct of_device_id ecpp_match[] = { {}, }; -static struct of_platform_driver ecpp_driver = { +static struct platform_driver ecpp_driver = { .driver = { .name = "ecpp", .owner = THIS_MODULE, @@ -247,7 +247,7 @@ static struct of_platform_driver ecpp_driver = { static int parport_pc_find_nonpci_ports(int autoirq, int autodma) { - return of_register_platform_driver(&ecpp_driver); + return platform_driver_register(&ecpp_driver); } #endif /* !(_ASM_SPARC64_PARPORT_H */ diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h index a2b4302869bc..069bf4d663a1 100644 --- a/arch/sparc/include/asm/rwsem.h +++ b/arch/sparc/include/asm/rwsem.h @@ -13,53 +13,12 @@ #ifdef __KERNEL__ -#include <linux/list.h> -#include <linux/spinlock.h> - -struct rwsem_waiter; - -struct rw_semaphore { - signed long count; #define RWSEM_UNLOCKED_VALUE 0x00000000L #define RWSEM_ACTIVE_BIAS 0x00000001L #define RWSEM_ACTIVE_MASK 0xffffffffL #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) /* * lock for reading @@ -160,11 +119,6 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) return atomic64_add_return(delta, (atomic64_t *)(&sem->count)); } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _SPARC64_RWSEM_H */ diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 52de4a9424e8..f679c57644d5 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -137,8 +137,7 @@ static const struct file_operations apc_fops = { static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; -static int __devinit apc_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit apc_probe(struct platform_device *op) { int err; @@ -174,7 +173,7 @@ static struct of_device_id __initdata apc_match[] = { }; MODULE_DEVICE_TABLE(of, apc_match); -static struct of_platform_driver apc_driver = { +static struct platform_driver apc_driver = { .driver = { .name = "apc", .owner = THIS_MODULE, @@ -185,7 +184,7 @@ static struct of_platform_driver apc_driver = { static int __init apc_init(void) { - return of_register_platform_driver(&apc_driver); + return platform_driver_register(&apc_driver); } /* This driver is not critical to the boot process diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 3efd3c5af6a9..2abace076c7d 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -102,8 +102,7 @@ static struct of_device_id __initdata auxio_match[] = { MODULE_DEVICE_TABLE(of, auxio_match); -static int __devinit auxio_probe(struct platform_device *dev, - const struct of_device_id *match) +static int __devinit auxio_probe(struct platform_device *dev) { struct device_node *dp = dev->dev.of_node; unsigned long size; @@ -132,7 +131,7 @@ static int __devinit auxio_probe(struct platform_device *dev, return 0; } -static struct of_platform_driver auxio_driver = { +static struct platform_driver auxio_driver = { .probe = auxio_probe, .driver = { .name = "auxio", @@ -143,7 +142,7 @@ static struct of_platform_driver auxio_driver = { static int __init auxio_init(void) { - return of_register_platform_driver(&auxio_driver); + return platform_driver_register(&auxio_driver); } /* Must be after subsys_initcall() so that busses are probed. Must diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index cfa2624c5332..136d3718a74a 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -59,8 +59,7 @@ static int __devinit clock_board_calc_nslots(struct clock_board *p) } } -static int __devinit clock_board_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit clock_board_probe(struct platform_device *op) { struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); int err = -ENOMEM; @@ -148,7 +147,7 @@ static struct of_device_id __initdata clock_board_match[] = { {}, }; -static struct of_platform_driver clock_board_driver = { +static struct platform_driver clock_board_driver = { .probe = clock_board_probe, .driver = { .name = "clock_board", @@ -157,8 +156,7 @@ static struct of_platform_driver clock_board_driver = { }, }; -static int __devinit fhc_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit fhc_probe(struct platform_device *op) { struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); int err = -ENOMEM; @@ -254,7 +252,7 @@ static struct of_device_id __initdata fhc_match[] = { {}, }; -static struct of_platform_driver fhc_driver = { +static struct platform_driver fhc_driver = { .probe = fhc_probe, .driver = { .name = "fhc", @@ -265,8 +263,8 @@ static struct of_platform_driver fhc_driver = { static int __init sunfire_init(void) { - (void) of_register_platform_driver(&fhc_driver); - (void) of_register_platform_driver(&clock_board_driver); + (void) platform_driver_register(&fhc_driver); + (void) platform_driver_register(&clock_board_driver); return 0; } diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 08c466ebb32b..668c7be5d365 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -392,8 +392,7 @@ static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, } } -static int __devinit jbusmc_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit jbusmc_probe(struct platform_device *op) { const struct linux_prom64_registers *mem_regs; struct device_node *mem_node; @@ -690,8 +689,7 @@ static void chmc_fetch_decode_regs(struct chmc *p) chmc_read_mcreg(p, CHMCTRL_DECODE4)); } -static int __devinit chmc_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit chmc_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; unsigned long ver; @@ -765,13 +763,12 @@ out_free: goto out; } -static int __devinit us3mc_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit us3mc_probe(struct platform_device *op) { if (mc_type == MC_TYPE_SAFARI) - return chmc_probe(op, match); + return chmc_probe(op); else if (mc_type == MC_TYPE_JBUS) - return jbusmc_probe(op, match); + return jbusmc_probe(op); return -ENODEV; } @@ -810,7 +807,7 @@ static const struct of_device_id us3mc_match[] = { }; MODULE_DEVICE_TABLE(of, us3mc_match); -static struct of_platform_driver us3mc_driver = { +static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", .owner = THIS_MODULE, @@ -848,7 +845,7 @@ static int __init us3mc_init(void) ret = register_dimm_printer(us3mc_dimm_printer); if (!ret) { - ret = of_register_platform_driver(&us3mc_driver); + ret = platform_driver_register(&us3mc_driver); if (ret) unregister_dimm_printer(us3mc_dimm_printer); } @@ -859,7 +856,7 @@ static void __exit us3mc_cleanup(void) { if (us3mc_platform()) { unregister_dimm_printer(us3mc_dimm_printer); - of_unregister_platform_driver(&us3mc_driver); + platform_driver_unregister(&us3mc_driver); } } diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index efb896d68754..be5e2441c6d7 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c @@ -455,8 +455,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm, return 0; } -static int __devinit fire_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit fire_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -507,7 +506,7 @@ static struct of_device_id __initdata fire_match[] = { {}, }; -static struct of_platform_driver fire_driver = { +static struct platform_driver fire_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -518,7 +517,7 @@ static struct of_platform_driver fire_driver = { static int __init fire_init(void) { - return of_register_platform_driver(&fire_driver); + return platform_driver_register(&fire_driver); } subsys_initcall(fire_init); diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c index 22eab7cf3b11..56ee745064de 100644 --- a/arch/sparc/kernel/pci_psycho.c +++ b/arch/sparc/kernel/pci_psycho.c @@ -503,8 +503,7 @@ static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid) #define PSYCHO_CONFIGSPACE 0x001000000UL -static int __devinit psycho_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit psycho_probe(struct platform_device *op) { const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; @@ -601,7 +600,7 @@ static struct of_device_id __initdata psycho_match[] = { {}, }; -static struct of_platform_driver psycho_driver = { +static struct platform_driver psycho_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -612,7 +611,7 @@ static struct of_platform_driver psycho_driver = { static int __init psycho_init(void) { - return of_register_platform_driver(&psycho_driver); + return platform_driver_register(&psycho_driver); } subsys_initcall(psycho_init); diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 5c3f5ec4cabc..2857073342d2 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c @@ -452,8 +452,7 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, sabre_scan_bus(pbm, &op->dev); } -static int __devinit sabre_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit sabre_probe(struct platform_device *op) { const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; @@ -464,7 +463,7 @@ static int __devinit sabre_probe(struct platform_device *op, const u32 *vdma; u64 clear_irq; - hummingbird_p = (match->data != NULL); + hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); if (!hummingbird_p) { struct device_node *cpu_dp; @@ -595,7 +594,7 @@ static struct of_device_id __initdata sabre_match[] = { {}, }; -static struct of_platform_driver sabre_driver = { +static struct platform_driver sabre_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -606,7 +605,7 @@ static struct of_platform_driver sabre_driver = { static int __init sabre_init(void) { - return of_register_platform_driver(&sabre_driver); + return platform_driver_register(&sabre_driver); } subsys_initcall(sabre_init); diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 445a47a2fb3d..6783410ceb02 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -1460,10 +1460,11 @@ out_err: return err; } -static int __devinit schizo_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit schizo_probe(struct platform_device *op) { - return __schizo_init(op, (unsigned long) match->data); + if (!op->dev.of_match) + return -EINVAL; + return __schizo_init(op, (unsigned long) op->dev.of_match->data); } /* The ordering of this table is very important. Some Tomatillo @@ -1490,7 +1491,7 @@ static struct of_device_id __initdata schizo_match[] = { {}, }; -static struct of_platform_driver schizo_driver = { +static struct platform_driver schizo_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -1501,7 +1502,7 @@ static struct of_platform_driver schizo_driver = { static int __init schizo_init(void) { - return of_register_platform_driver(&schizo_driver); + return platform_driver_register(&schizo_driver); } subsys_initcall(schizo_init); diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 743344aa6d8a..158cd739b263 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -918,8 +918,7 @@ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, return 0; } -static int __devinit pci_sun4v_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit pci_sun4v_probe(struct platform_device *op) { const struct linux_prom64_registers *regs; static int hvapi_negotiated = 0; @@ -1008,7 +1007,7 @@ static struct of_device_id __initdata pci_sun4v_match[] = { {}, }; -static struct of_platform_driver pci_sun4v_driver = { +static struct platform_driver pci_sun4v_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -1019,7 +1018,7 @@ static struct of_platform_driver pci_sun4v_driver = { static int __init pci_sun4v_init(void) { - return of_register_platform_driver(&pci_sun4v_driver); + return platform_driver_register(&pci_sun4v_driver); } subsys_initcall(pci_sun4v_init); diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index aeaa09a3c655..2cdc131b50ac 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -700,10 +700,8 @@ static void pcic_clear_clock_irq(void) static irqreturn_t pcic_timer_handler (int irq, void *h) { - write_seqlock(&xtime_lock); /* Dummy, to show that we remember */ pcic_clear_clock_irq(); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 94536a85f161..93d7b4465f8d 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c @@ -51,8 +51,7 @@ static void pmc_swift_idle(void) #endif } -static int __devinit pmc_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit pmc_probe(struct platform_device *op) { regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), PMC_OBPNAME); @@ -78,7 +77,7 @@ static struct of_device_id __initdata pmc_match[] = { }; MODULE_DEVICE_TABLE(of, pmc_match); -static struct of_platform_driver pmc_driver = { +static struct platform_driver pmc_driver = { .driver = { .name = "pmc", .owner = THIS_MODULE, @@ -89,7 +88,7 @@ static struct of_platform_driver pmc_driver = { static int __init pmc_init(void) { - return of_register_platform_driver(&pmc_driver); + return platform_driver_register(&pmc_driver); } /* This driver is not critical to the boot process diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index 2c59f4d387dd..cd725fe238b2 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c @@ -33,7 +33,7 @@ static int __devinit has_button_interrupt(unsigned int irq, struct device_node * return 1; } -static int __devinit power_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit power_probe(struct platform_device *op) { struct resource *res = &op->resource[0]; unsigned int irq = op->archdata.irqs[0]; @@ -59,7 +59,7 @@ static struct of_device_id __initdata power_match[] = { {}, }; -static struct of_platform_driver power_driver = { +static struct platform_driver power_driver = { .probe = power_probe, .driver = { .name = "power", @@ -70,7 +70,7 @@ static struct of_platform_driver power_driver = { static int __init power_init(void) { - return of_register_platform_driver(&power_driver); + return platform_driver_register(&power_driver); } device_initcall(power_init); diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 9c743b1886ff..19ab42a932db 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ #define TICK_SIZE (tick_nsec / 1000) @@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id) profile_tick(CPU_PROFILING); #endif - /* Protect counter clear so that do_gettimeoffset works */ - write_seqlock(&xtime_lock); - clear_clock_irq(); - do_timer(1); - - write_sequnlock(&xtime_lock); + xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); @@ -142,7 +137,7 @@ static struct platform_device m48t59_rtc = { }, }; -static int __devinit clock_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit clock_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); @@ -176,7 +171,7 @@ static struct of_device_id __initdata clock_match[] = { {}, }; -static struct of_platform_driver clock_driver = { +static struct platform_driver clock_driver = { .probe = clock_probe, .driver = { .name = "rtc", @@ -189,7 +184,7 @@ static struct of_platform_driver clock_driver = { /* Probe for the mostek real time clock chip. */ static int __init clock_init(void) { - return of_register_platform_driver(&clock_driver); + return platform_driver_register(&clock_driver); } /* Must be after subsys_initcall() so that busses are probed. Must * be before device_initcall() because things like the RTC driver diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3bc9c9979b92..e1862793a61d 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -419,7 +419,7 @@ static struct platform_device rtc_cmos_device = { .num_resources = 1, }; -static int __devinit rtc_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit rtc_probe(struct platform_device *op) { struct resource *r; @@ -462,7 +462,7 @@ static struct of_device_id __initdata rtc_match[] = { {}, }; -static struct of_platform_driver rtc_driver = { +static struct platform_driver rtc_driver = { .probe = rtc_probe, .driver = { .name = "rtc", @@ -477,7 +477,7 @@ static struct platform_device rtc_bq4802_device = { .num_resources = 1, }; -static int __devinit bq4802_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit bq4802_probe(struct platform_device *op) { printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n", @@ -495,7 +495,7 @@ static struct of_device_id __initdata bq4802_match[] = { {}, }; -static struct of_platform_driver bq4802_driver = { +static struct platform_driver bq4802_driver = { .probe = bq4802_probe, .driver = { .name = "bq4802", @@ -534,7 +534,7 @@ static struct platform_device m48t59_rtc = { }, }; -static int __devinit mostek_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit mostek_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; @@ -559,7 +559,7 @@ static struct of_device_id __initdata mostek_match[] = { {}, }; -static struct of_platform_driver mostek_driver = { +static struct platform_driver mostek_driver = { .probe = mostek_probe, .driver = { .name = "mostek", @@ -586,9 +586,9 @@ static int __init clock_init(void) if (tlb_type == hypervisor) return platform_device_register(&rtc_sun4v_device); - (void) of_register_platform_driver(&rtc_driver); - (void) of_register_platform_driver(&mostek_driver); - (void) of_register_platform_driver(&bq4802_driver); + (void) platform_driver_register(&rtc_driver); + (void) platform_driver_register(&mostek_driver); + (void) platform_driver_register(&bq4802_driver); return 0; } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 0c1e6783657f..92b557afe535 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -108,7 +108,7 @@ SECTIONS __sun4v_2insn_patch_end = .; } - PERCPU(PAGE_SIZE) + PERCPU(SMP_CACHE_BYTES, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index cbddeb38ffda..d3c7a12ad879 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -16,7 +16,7 @@ #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { - [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED + [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash) }; #else /* SMP */ diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index fe0d10dcae57..d03ec124a598 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -29,16 +29,16 @@ #include <linux/uaccess.h> #include <linux/errno.h> -extern struct __get_user futex_set(int __user *v, int i); -extern struct __get_user futex_add(int __user *v, int n); -extern struct __get_user futex_or(int __user *v, int n); -extern struct __get_user futex_andn(int __user *v, int n); -extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); +extern struct __get_user futex_set(u32 __user *v, int i); +extern struct __get_user futex_add(u32 __user *v, int n); +extern struct __get_user futex_or(u32 __user *v, int n); +extern struct __get_user futex_andn(u32 __user *v, int n); +extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); #ifndef __tilegx__ -extern struct __get_user futex_xor(int __user *v, int n); +extern struct __get_user futex_xor(u32 __user *v, int n); #else -static inline struct __get_user futex_xor(int __user *uaddr, int n) +static inline struct __get_user futex_xor(u32 __user *uaddr, int n) { struct __get_user asm_ret = __get_user_4(uaddr); if (!asm_ret.err) { @@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) } #endif -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { struct __get_user asm_ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); - return asm_ret.err ? asm_ret.err : asm_ret.val; + *uval = asm_ret.val; + return asm_ret.err; } #ifndef __tilegx__ diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 25fdc0c1839a..c6ce378e0678 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -63,7 +63,7 @@ SECTIONS *(.init.page) } :data =0 INIT_DATA_SECTION(16) - PERCPU(PAGE_SIZE) + PERCPU(L2_CACHE_BYTES, PAGE_SIZE) . = ALIGN(PAGE_SIZE); VMLINUX_SYMBOL(_einitdata) = .; diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index e351e14b4339..1e78940218c0 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -7,6 +7,7 @@ config UML bool default y select HAVE_GENERIC_HARDIRQS + select GENERIC_HARDIRQS_NO_DEPRECATED config MMU bool diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 5ee328099c63..02fb017fed47 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 @@ -10,6 +10,8 @@ endmenu config UML_X86 def_bool y + select GENERIC_FIND_FIRST_BIT + select GENERIC_FIND_NEXT_BIT config 64BIT bool @@ -19,6 +21,9 @@ config X86_32 def_bool !64BIT select HAVE_AOUT +config X86_64 + def_bool 64BIT + config RWSEM_XCHGADD_ALGORITHM def_bool X86_XADD diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 975613b23dcf..c70e047eed72 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -124,35 +124,18 @@ void mconsole_log(struct mc_request *req) #if 0 void mconsole_proc(struct mc_request *req) { - struct nameidata nd; struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt; struct file *file; - int n, err; + int n; char *ptr = req->request.data, *buf; mm_segment_t old_fs = get_fs(); ptr += strlen("proc"); ptr = skip_spaces(ptr); - err = vfs_path_lookup(mnt->mnt_root, mnt, ptr, LOOKUP_FOLLOW, &nd); - if (err) { - mconsole_reply(req, "Failed to look up file", 1, 0); - goto out; - } - - err = may_open(&nd.path, MAY_READ, O_RDONLY); - if (result) { - mconsole_reply(req, "Failed to open file", 1, 0); - path_put(&nd.path); - goto out; - } - - file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY, - current_cred()); - err = PTR_ERR(file); + file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY); if (IS_ERR(file)) { mconsole_reply(req, "Failed to open file", 1, 0); - path_put(&nd.path); goto out; } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index ba4a98ba39c0..620f5b70957d 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -185,7 +185,7 @@ struct ubd { .no_cow = 0, \ .shared = 0, \ .cow = DEFAULT_COW, \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ .request = NULL, \ .start_sg = 0, \ .end_sg = 0, \ diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index ac55b9efa1ce..34bede8aad4a 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -42,7 +42,7 @@ INIT_SETUP(0) } - PERCPU(32) + PERCPU(32, 32) .initcall.init : { INIT_CALLS diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 3f0ac9e0c966..64cfea80cfe2 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -35,8 +35,10 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; + struct irq_desc *desc = irq_to_desc(i); + + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; if (!action) goto skip; seq_printf(p, "%3d: ",i); @@ -46,7 +48,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->name); + seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -54,7 +56,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } else if (i == NR_IRQS) seq_putc(p, '\n'); @@ -360,10 +362,10 @@ EXPORT_SYMBOL(um_request_irq); EXPORT_SYMBOL(reactivate_fd); /* - * irq_chip must define (startup || enable) && - * (shutdown || disable) && end + * irq_chip must define at least enable/disable and ack when + * the edge handler is used. */ -static void dummy(unsigned int irq) +static void dummy(struct irq_data *d) { } @@ -371,20 +373,17 @@ static void dummy(unsigned int irq) static struct irq_chip normal_irq_type = { .name = "SIGIO", .release = free_irq_by_irq_and_dev, - .disable = dummy, - .enable = dummy, - .ack = dummy, - .end = dummy + .irq_disable = dummy, + .irq_enable = dummy, + .irq_ack = dummy, }; static struct irq_chip SIGVTALRM_irq_type = { .name = "SIGVTALRM", .release = free_irq_by_irq_and_dev, - .shutdown = dummy, /* never called */ - .disable = dummy, - .enable = dummy, - .ack = dummy, - .end = dummy + .irq_disable = dummy, + .irq_enable = dummy, + .irq_ack = dummy, }; void __init init_IRQ(void) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d5ed94d30aad..e1f65c46bc93 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -64,8 +64,12 @@ config X86 select HAVE_TEXT_POKE_SMP select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ + select GENERIC_FIND_FIRST_BIT + select GENERIC_FIND_NEXT_BIT select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP + select GENERIC_IRQ_SHOW + select IRQ_FORCED_THREADING select USE_GENERIC_SMP_HELPERS if SMP config INSTRUCTION_DECODER @@ -217,10 +221,6 @@ config X86_HT def_bool y depends on SMP -config X86_TRAMPOLINE - def_bool y - depends on SMP || (64BIT && ACPI_SLEEP) - config X86_32_LAZY_GS def_bool y depends on X86_32 && !CC_STACKPROTECTOR @@ -382,6 +382,8 @@ config X86_INTEL_CE depends on X86_32 depends on X86_EXTENDED_PLATFORM select X86_REBOOTFIXUPS + select OF + select OF_EARLY_FLATTREE ---help--- Select for the Intel CE media processor (CE4100) SOC. This option compiles in support for the CE4100 SOC for settop @@ -811,7 +813,7 @@ config X86_LOCAL_APIC config X86_IO_APIC def_bool y - depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC + depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC config X86_VISWS_APIC def_bool y @@ -1705,7 +1707,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID depends on NUMA config USE_PERCPU_NUMA_NODE_ID - def_bool X86_64 + def_bool y depends on NUMA menu "Power management and ACPI options" @@ -2066,9 +2068,10 @@ config SCx200HR_TIMER config OLPC bool "One Laptop Per Child support" + depends on !X86_PAE select GPIOLIB - select OLPC_OPENFIRMWARE - depends on !X86_64 && !X86_PAE + select OF + select OF_PROMTREE if PROC_DEVICETREE ---help--- Add support for detecting the unique features of the OLPC XO hardware. @@ -2079,21 +2082,6 @@ config OLPC_XO1 ---help--- Add support for non-essential features of the OLPC XO-1 laptop. -config OLPC_OPENFIRMWARE - bool "Support for OLPC's Open Firmware" - depends on !X86_64 && !X86_PAE - default n - select OF - help - This option adds support for the implementation of Open Firmware - that is used on the OLPC XO-1 Children's Machine. - If unsure, say N here. - -config OLPC_OPENFIRMWARE_DT - bool - default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE - select OF_PROMTREE - endif # X86_32 config AMD_NB @@ -2138,6 +2126,11 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC +config KEYS_COMPAT + bool + depends on COMPAT && KEYS + default y + endmenu diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 283c5a6a03a6..ed47e6e1747f 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -294,11 +294,6 @@ config X86_GENERIC endif -config X86_CPU - def_bool y - select GENERIC_FIND_FIRST_BIT - select GENERIC_FIND_NEXT_BIT - # # Define implied options from the CPU selection here config X86_INTERNODE_CACHE_SHIFT diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 646aa78ba5fd..46a823882437 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -62,7 +62,12 @@ int main(int argc, char *argv[]) if (fseek(f, -4L, SEEK_END)) { perror(argv[1]); } - fread(&olen, sizeof olen, 1, f); + + if (fread(&olen, sizeof(olen), 1, f) != 1) { + perror(argv[1]); + return 1; + } + ilen = ftell(f); olen = getle32(&olen); fclose(f); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index e1e60c7d5813..e0e6340c8dad 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -873,22 +873,18 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); - if (ret) { - crypto_free_ablkcipher(ctr_tfm); - return ret; - } + if (ret) + goto out_free_ablkcipher; + ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); - if (!req) { - crypto_free_ablkcipher(ctr_tfm); - return -EINVAL; - } + if (!req) + goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); - if (!req_data) { - crypto_free_ablkcipher(ctr_tfm); - return -ENOMEM; - } + if (!req_data) + goto out_free_request; + memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ @@ -913,8 +909,10 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) if (!ret) ret = req_data->result.err; } - ablkcipher_request_free(req); kfree(req_data); +out_free_request: + ablkcipher_request_free(req); +out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 518bb99c3394..430312ba6e3f 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -25,6 +25,8 @@ #define sysretl_audit ia32_ret_from_sys_call #endif + .section .entry.text, "ax" + #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) .macro IA32_ARG_FIXUP noebp=0 @@ -126,26 +128,20 @@ ENTRY(ia32_sysenter_target) */ ENABLE_INTERRUPTS(CLBR_NONE) movl %ebp,%ebp /* zero extension */ - pushq $__USER32_DS - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $__USER32_DS /*CFI_REL_OFFSET ss,0*/ - pushq %rbp - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rbp CFI_REL_OFFSET rsp,0 - pushfq - CFI_ADJUST_CFA_OFFSET 8 + pushfq_cfi /*CFI_REL_OFFSET rflags,0*/ movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d CFI_REGISTER rip,r10 - pushq $__USER32_CS - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $__USER32_CS /*CFI_REL_OFFSET cs,0*/ movl %eax, %eax - pushq %r10 - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %r10 CFI_REL_OFFSET rip,0 - pushq %rax - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rax cld SAVE_ARGS 0,0,1 /* no need to do an access_ok check here because rbp has been @@ -182,11 +178,9 @@ sysexit_from_sys_call: xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 - popfq - CFI_ADJUST_CFA_OFFSET -8 + popfq_cfi /*CFI_RESTORE rflags*/ - popq %rcx /* User %esp */ - CFI_ADJUST_CFA_OFFSET -8 + popq_cfi %rcx /* User %esp */ CFI_REGISTER rsp,rcx TRACE_IRQS_ON ENABLE_INTERRUPTS_SYSEXIT32 @@ -421,8 +415,7 @@ ENTRY(ia32_syscall) */ ENABLE_INTERRUPTS(CLBR_NONE) movl %eax,%eax - pushq %rax - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rax cld /* note the registers are not zero extended to the sf. this could be a problem. */ @@ -851,4 +844,7 @@ ia32_sys_call_table: .quad sys_fanotify_init .quad sys32_fanotify_mark .quad sys_prlimit64 /* 340 */ + .quad sys_name_to_handle_at + .quad compat_sys_open_by_handle_at + .quad compat_sys_clock_adjtime ia32_syscall_end: diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4ea15ca89b2b..448d73a371ba 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -29,6 +29,7 @@ #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mpspec.h> +#include <asm/trampoline.h> #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long @@ -117,7 +118,8 @@ static inline void acpi_disable_pci(void) extern int acpi_save_state_mem(void); extern void acpi_restore_state_mem(void); -extern unsigned long acpi_wakeup_address; +extern const unsigned char acpi_wakeup_code[]; +#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code))) /* early initialization routine */ extern void acpi_reserve_wakeup_memory(void); @@ -186,15 +188,7 @@ struct bootnode; #ifdef CONFIG_ACPI_NUMA extern int acpi_numa; -extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start, - unsigned long end); -extern int acpi_scan_nodes(unsigned long start, unsigned long end); -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) - -#ifdef CONFIG_NUMA_EMU -extern void acpi_fake_nodes(const struct bootnode *fake_nodes, - int num_nodes); -#endif +extern int x86_acpi_numa_init(void); #endif /* CONFIG_ACPI_NUMA */ #define acpi_unlazy_tlb(x) leave_mm(x) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 64dc82ee19f0..331682231bb4 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -9,23 +9,20 @@ struct amd_nb_bus_dev_range { u8 dev_limit; }; -extern struct pci_device_id amd_nb_misc_ids[]; +extern const struct pci_device_id amd_nb_misc_ids[]; extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; struct bootnode; -extern int early_is_amd_nb(u32 value); +extern bool early_is_amd_nb(u32 value); extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); -extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); -extern int amd_scan_nodes(void); - -#ifdef CONFIG_NUMA_EMU -extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); -extern void amd_get_nodes(struct bootnode *nodes); -#endif +extern int amd_numa_init(void); +extern int amd_get_subcaches(int); +extern int amd_set_subcaches(int, int); struct amd_northbridge { struct pci_dev *misc; + struct pci_dev *link; }; struct amd_northbridge_info { @@ -35,17 +32,18 @@ struct amd_northbridge_info { }; extern struct amd_northbridge_info amd_northbridges; -#define AMD_NB_GART 0x1 -#define AMD_NB_L3_INDEX_DISABLE 0x2 +#define AMD_NB_GART BIT(0) +#define AMD_NB_L3_INDEX_DISABLE BIT(1) +#define AMD_NB_L3_PARTITIONING BIT(2) #ifdef CONFIG_AMD_NB -static inline int amd_nb_num(void) +static inline u16 amd_nb_num(void) { return amd_northbridges.num; } -static inline int amd_nb_has_feature(int feature) +static inline bool amd_nb_has_feature(unsigned feature) { return ((amd_northbridges.flags & feature) == feature); } diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 3c896946f4cc..a279d98ea95e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -220,7 +220,6 @@ extern void enable_IR_x2apic(void); extern int get_physical_broadcast(void); -extern void apic_disable(void); extern int lapic_get_maxlvt(void); extern void clear_local_APIC(void); extern void connect_bsp_APIC(void); @@ -228,7 +227,6 @@ extern void disconnect_bsp_APIC(int virt_wire_setup); extern void disable_local_APIC(void); extern void lapic_shutdown(void); extern int verify_local_APIC(void); -extern void cache_APIC_registers(void); extern void sync_Arb_IDs(void); extern void init_bsp_APIC(void); extern void setup_local_APIC(void); @@ -239,8 +237,7 @@ void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); extern void setup_secondary_APIC_clock(void); extern int APIC_init_uniprocessor(void); -extern void enable_NMI_through_LVT0(void); -extern int apic_force_enable(void); +extern int apic_force_enable(unsigned long addr); /* * On 32bit this is mach-xxx local @@ -261,7 +258,6 @@ static inline void lapic_shutdown(void) { } #define local_apic_timer_c2_ok 1 static inline void init_apic_mappings(void) { } static inline void disable_local_APIC(void) { } -static inline void apic_disable(void) { } # define setup_boot_APIC_clock x86_init_noop # define setup_secondary_APIC_clock x86_init_noop #endif /* !CONFIG_X86_LOCAL_APIC */ @@ -307,8 +303,6 @@ struct apic { void (*setup_apic_routing)(void); int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); void (*setup_portio_remap)(void); @@ -356,6 +350,23 @@ struct apic { void (*icr_write)(u32 low, u32 high); void (*wait_icr_idle)(void); u32 (*safe_wait_icr_idle)(void); + +#ifdef CONFIG_X86_32 + /* + * Called very early during boot from get_smp_config(). It should + * return the logical apicid. x86_[bios]_cpu_to_apicid is + * initialized before this function is called. + * + * If logical apicid can't be determined that early, the function + * may return BAD_APICID. Logical apicid will be configured after + * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity + * won't be applied properly during early boot in this case. + */ + int (*x86_32_early_logical_apicid)(int cpu); + + /* determine CPU -> NUMA node mapping */ + int (*x86_32_numa_cpu_node)(int cpu); +#endif }; /* @@ -503,6 +514,11 @@ extern struct apic apic_noop; extern struct apic apic_default; +static inline int noop_x86_32_early_logical_apicid(int cpu) +{ + return BAD_APICID; +} + /* * Set up the logical destination ID. * @@ -522,7 +538,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -extern int default_apicid_to_node(int logical_apicid); +extern int default_x86_32_numa_cpu_node(int cpu); #endif @@ -558,12 +574,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma *retmap = *phys_map; } -/* Mapping from cpu number to logical apicid */ -static inline int default_cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - static inline int __default_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) @@ -596,8 +606,4 @@ extern int default_check_phys_apicid_present(int phys_apicid); #endif /* CONFIG_X86_LOCAL_APIC */ -#ifdef CONFIG_X86_32 -extern u8 cpu_2_logical_apicid[NR_CPUS]; -#endif - #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 47a30ff8e517..d87988bacf3e 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -426,4 +426,16 @@ struct local_apic { #else #define BAD_APICID 0xFFFFu #endif + +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; + #endif /* _ASM_X86_APICDEF_H */ diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h index c8bfe63a06de..e020d88ec02d 100644 --- a/arch/x86/include/asm/bootparam.h +++ b/arch/x86/include/asm/bootparam.h @@ -12,6 +12,7 @@ /* setup data types */ #define SETUP_NONE 0 #define SETUP_E820_EXT 1 +#define SETUP_DTB 2 /* extensible setup data list node */ struct setup_data { diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h new file mode 100644 index 000000000000..e656ad8c0a2e --- /dev/null +++ b/arch/x86/include/asm/ce4100.h @@ -0,0 +1,6 @@ +#ifndef _ASM_CE4100_H_ +#define _ASM_CE4100_H_ + +int ce4100_pci_init(void); + +#endif diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 220e2ea08e80..91f3e087cf21 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -160,6 +160,7 @@ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) +#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index e99d55d74df5..908b96957d88 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -96,7 +96,7 @@ extern void e820_setup_gap(void); extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, unsigned long start_addr, unsigned long long end_addr); struct setup_data; -extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data); +extern void parse_e820_ext(struct setup_data *data); #if defined(CONFIG_X86_64) || \ (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 57650ab4a5f5..1cd6d26a0a8d 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) -.irpc idx, "01234567" +.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +.if NUM_INVALIDATE_TLB_VECTORS > \idx BUILD_INTERRUPT3(invalidate_interrupt\idx, (INVALIDATE_TLB_VECTOR_START)+\idx, smp_invalidate_interrupt) +.endif .endr #endif diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h index 06850a7194e1..2c6fc9e62812 100644 --- a/arch/x86/include/asm/frame.h +++ b/arch/x86/include/asm/frame.h @@ -7,14 +7,12 @@ frame pointer later */ #ifdef CONFIG_FRAME_POINTER .macro FRAME - pushl %ebp - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebp CFI_REL_OFFSET ebp,0 movl %esp,%ebp .endm .macro ENDFRAME - popl %ebp - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebp CFI_RESTORE ebp .endm #else diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 1f11ce44e956..d09bb03653f0 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -37,7 +37,7 @@ "+m" (*uaddr), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) @@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { + int ret = 0; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) /* Real i386 machines have no cmpxchg instruction */ @@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, return -ENOSYS; #endif - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" + asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" "2:\t.section .fixup, \"ax\"\n" - "3:\tmov %2, %0\n" + "3:\tmov %3, %0\n" "\tjmp 2b\n" "\t.previous\n" _ASM_EXTABLE(1b, 3b) - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "+r" (ret), "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "1" (oldval) : "memory" ); - return oldval; + *uval = oldval; + return ret; } #endif diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 0274ec5a7e62..bb9efe8706e2 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void); extern void invalidate_interrupt5(void); extern void invalidate_interrupt6(void); extern void invalidate_interrupt7(void); +extern void invalidate_interrupt8(void); +extern void invalidate_interrupt9(void); +extern void invalidate_interrupt10(void); +extern void invalidate_interrupt11(void); +extern void invalidate_interrupt12(void); +extern void invalidate_interrupt13(void); +extern void invalidate_interrupt14(void); +extern void invalidate_interrupt15(void); +extern void invalidate_interrupt16(void); +extern void invalidate_interrupt17(void); +extern void invalidate_interrupt18(void); +extern void invalidate_interrupt19(void); +extern void invalidate_interrupt20(void); +extern void invalidate_interrupt21(void); +extern void invalidate_interrupt22(void); +extern void invalidate_interrupt23(void); +extern void invalidate_interrupt24(void); +extern void invalidate_interrupt25(void); +extern void invalidate_interrupt26(void); +extern void invalidate_interrupt27(void); +extern void invalidate_interrupt28(void); +extern void invalidate_interrupt29(void); +extern void invalidate_interrupt30(void); +extern void invalidate_interrupt31(void); extern void irq_move_cleanup_interrupt(void); extern void reboot_interrupt(void); diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index 36fb1a6a5109..8dbe353e41e1 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h @@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start, unsigned long page_size_mask); -extern unsigned long __initdata e820_table_start; -extern unsigned long __meminitdata e820_table_end; -extern unsigned long __meminitdata e820_table_top; +extern unsigned long __initdata pgt_buf_start; +extern unsigned long __meminitdata pgt_buf_end; +extern unsigned long __meminitdata pgt_buf_top; #endif /* _ASM_X86_INIT_32_H */ diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index f327d386d6cc..c4bd267dfc50 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -63,17 +63,6 @@ union IO_APIC_reg_03 { } __attribute__ ((packed)) bits; }; -enum ioapic_irq_destination_types { - dest_Fixed = 0, - dest_LowestPrio = 1, - dest_SMI = 2, - dest__reserved_1 = 3, - dest_NMI = 4, - dest_INIT = 5, - dest__reserved_2 = 6, - dest_ExtINT = 7 -}; - struct IO_APIC_route_entry { __u32 vector : 8, delivery_mode : 3, /* 000: FIXED @@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry { index : 15; } __attribute__ ((packed)); +#define IOAPIC_AUTO -1 +#define IOAPIC_EDGE 0 +#define IOAPIC_LEVEL 1 + #ifdef CONFIG_X86_IO_APIC /* @@ -150,11 +143,6 @@ extern int timer_through_8259; #define io_apic_assign_pci_irqs \ (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) -extern u8 io_apic_unique_id(u8 id); -extern int io_apic_get_unique_id(int ioapic, int apic_id); -extern int io_apic_get_version(int ioapic); -extern int io_apic_get_redir_entries(int ioapic); - struct io_apic_irq_attr; extern int io_apic_set_pci_routing(struct device *dev, int irq, struct io_apic_irq_attr *irq_attr); @@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi); extern void ioapic_and_gsi_init(void); extern void ioapic_insert_resources(void); +int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr); + extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); @@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void); extern void mp_save_irq(struct mpc_intsrc *m); +extern void disable_ioapic_support(void); + #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 @@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; } struct io_apic_irq_attr; static inline int io_apic_set_pci_routing(struct device *dev, int irq, struct io_apic_irq_attr *irq_attr) { return 0; } + +static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void) +{ + return NULL; +} + +static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { } +static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent) +{ + return -ENOMEM; +} + +static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { } +static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent) +{ + return -ENOMEM; +} + +static inline void mp_save_irq(struct mpc_intsrc *m) { }; +static inline void disable_ioapic_support(void) { } #endif #endif /* _ASM_X86_IO_APIC_H */ diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index 0b7228268a63..615fa9061b57 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); -extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, - int vector); -extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, - int vector); /* Avoid include hell */ #define NMI_VECTOR 0x02 @@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector) } #ifdef CONFIG_X86_32 +extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector); extern void default_send_IPI_mask_logical(const struct cpumask *mask, int vector); extern void default_send_IPI_allbutself(int vector); diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index c704b38c57a2..ba870bb6dd8e 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -10,9 +10,6 @@ #include <asm/apicdef.h> #include <asm/irq_vectors.h> -/* Even though we don't support this, supply it to appease OF */ -static inline void irq_dispose_mapping(unsigned int virq) { } - static inline int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h new file mode 100644 index 000000000000..423bbbddf36d --- /dev/null +++ b/arch/x86/include/asm/irq_controller.h @@ -0,0 +1,12 @@ +#ifndef __IRQ_CONTROLLER__ +#define __IRQ_CONTROLLER__ + +struct irq_domain { + int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize, + u32 *out_hwirq, u32 *out_type); + void *priv; + struct device_node *controller; + struct list_head l; +}; + +#endif diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6af0894dafb4..6e976ee3b3ef 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_IRQ_VECTORS_H #define _ASM_X86_IRQ_VECTORS_H +#include <linux/threads.h> /* * Linux IRQ vector layout. * @@ -16,8 +17,8 @@ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events * Vectors 32 ... 127 : device interrupts * Vector 128 : legacy int80 syscall interface - * Vectors 129 ... 237 : device interrupts - * Vectors 238 ... 255 : special interrupts + * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts + * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts * * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. * @@ -96,37 +97,43 @@ #define THRESHOLD_APIC_VECTOR 0xf9 #define REBOOT_VECTOR 0xf8 -/* f0-f7 used for spreading out TLB flushes: */ -#define INVALIDATE_TLB_VECTOR_END 0xf7 -#define INVALIDATE_TLB_VECTOR_START 0xf0 -#define NUM_INVALIDATE_TLB_VECTORS 8 - -/* - * Local APIC timer IRQ vector is on a different priority level, - * to work around the 'lost local interrupt if more than 2 IRQ - * sources per level' errata. - */ -#define LOCAL_TIMER_VECTOR 0xef - /* * Generic system vector for platform specific use */ -#define X86_PLATFORM_IPI_VECTOR 0xed +#define X86_PLATFORM_IPI_VECTOR 0xf7 /* * IRQ work vector: */ -#define IRQ_WORK_VECTOR 0xec +#define IRQ_WORK_VECTOR 0xf6 -#define UV_BAU_MESSAGE 0xea +#define UV_BAU_MESSAGE 0xf5 /* * Self IPI vector for machine checks */ -#define MCE_SELF_VECTOR 0xeb +#define MCE_SELF_VECTOR 0xf4 /* Xen vector callback to receive events in a HVM domain */ -#define XEN_HVM_EVTCHN_CALLBACK 0xe9 +#define XEN_HVM_EVTCHN_CALLBACK 0xf3 + +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xef + +/* up to 32 vectors used for spreading out TLB flushes: */ +#if NR_CPUS <= 32 +# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS) +#else +# define NUM_INVALIDATE_TLB_VECTORS (32) +#endif + +#define INVALIDATE_TLB_VECTOR_END (0xee) +#define INVALIDATE_TLB_VECTOR_START \ + (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1) #define NR_VECTORS 256 diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index ca242d35e873..518bbbb9ee59 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -13,7 +13,6 @@ enum die_val { DIE_PANIC, DIE_NMI, DIE_DIE, - DIE_NMIWATCHDOG, DIE_KERNELDEBUG, DIE_TRAP, DIE_GPF, diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 0c90dd9f0505..9c7d95f6174b 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -25,7 +25,6 @@ extern int pic_mode; #define MAX_IRQ_SOURCES 256 extern unsigned int def_to_bigsmp; -extern u8 apicid_2_node[]; #ifdef CONFIG_X86_NUMAQ extern int mp_bus_id_to_node[MAX_MP_BUSSES]; @@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES]; extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #endif -#define MAX_APICID 256 - #else /* CONFIG_X86_64: */ #define MAX_MP_BUSSES 256 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 43a18c77676d..823d48223400 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -52,6 +52,9 @@ #define MSR_IA32_MCG_STATUS 0x0000017a #define MSR_IA32_MCG_CTL 0x0000017b +#define MSR_OFFCORE_RSP_0 0x000001a6 +#define MSR_OFFCORE_RSP_1 0x000001a7 + #define MSR_IA32_PEBS_ENABLE 0x000003f1 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c76f5b92b840..07f46016d3ff 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -7,7 +7,6 @@ #ifdef CONFIG_X86_LOCAL_APIC -extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); extern int reserve_perfctr_nmi(unsigned int); extern void release_perfctr_nmi(unsigned int); diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 27da400d3138..3d4dab43c994 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -1,5 +1,57 @@ +#ifndef _ASM_X86_NUMA_H +#define _ASM_X86_NUMA_H + +#include <asm/topology.h> +#include <asm/apicdef.h> + +#ifdef CONFIG_NUMA + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) + +/* + * __apicid_to_node[] stores the raw mapping between physical apicid and + * node and is used to initialize cpu_to_node mapping. + * + * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus + * should be accessed by the accessors - set_apicid_to_node() and + * numa_cpu_node(). + */ +extern s16 __apicid_to_node[MAX_LOCAL_APIC]; + +static inline void set_apicid_to_node(int apicid, s16 node) +{ + __apicid_to_node[apicid] = node; +} +#else /* CONFIG_NUMA */ +static inline void set_apicid_to_node(int apicid, s16 node) +{ +} +#endif /* CONFIG_NUMA */ + #ifdef CONFIG_X86_32 # include "numa_32.h" #else # include "numa_64.h" #endif + +#ifdef CONFIG_NUMA +extern void __cpuinit numa_set_node(int cpu, int node); +extern void __cpuinit numa_clear_node(int cpu); +extern void __init numa_init_array(void); +extern void __init init_cpu_to_node(void); +extern void __cpuinit numa_add_cpu(int cpu); +extern void __cpuinit numa_remove_cpu(int cpu); +#else /* CONFIG_NUMA */ +static inline void numa_set_node(int cpu, int node) { } +static inline void numa_clear_node(int cpu) { } +static inline void numa_init_array(void) { } +static inline void init_cpu_to_node(void) { } +static inline void numa_add_cpu(int cpu) { } +static inline void numa_remove_cpu(int cpu) { } +#endif /* CONFIG_NUMA */ + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); +#endif + +#endif /* _ASM_X86_NUMA_H */ diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index b0ef2b449a9d..c6beed1ef103 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h @@ -4,7 +4,12 @@ extern int numa_off; extern int pxm_to_nid(int pxm); -extern void numa_remove_cpu(int cpu); + +#ifdef CONFIG_NUMA +extern int __cpuinit numa_cpu_node(int cpu); +#else /* CONFIG_NUMA */ +static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } +#endif /* CONFIG_NUMA */ #ifdef CONFIG_HIGHMEM extern void set_highmem_pages_init(void); diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 0493be39607c..344eb1790b46 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -2,23 +2,16 @@ #define _ASM_X86_NUMA_64_H #include <linux/nodemask.h> -#include <asm/apicdef.h> struct bootnode { u64 start; u64 end; }; -extern int compute_hash_shift(struct bootnode *nodes, int numblks, - int *nodeids); - #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) -extern void numa_init_array(void); extern int numa_off; -extern s16 apicid_to_node[MAX_LOCAL_APIC]; - extern unsigned long numa_free_all_bootmem(void); extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); @@ -31,11 +24,11 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, */ #define NODE_MIN_SIZE (4*1024*1024) -extern void __init init_cpu_to_node(void); -extern void __cpuinit numa_set_node(int cpu, int node); -extern void __cpuinit numa_clear_node(int cpu); -extern void __cpuinit numa_add_cpu(int cpu); -extern void __cpuinit numa_remove_cpu(int cpu); +extern nodemask_t numa_nodes_parsed __initdata; + +extern int __cpuinit numa_cpu_node(int cpu); +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void __init numa_set_distance(int from, int to, int distance); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) @@ -43,11 +36,7 @@ extern void __cpuinit numa_remove_cpu(int cpu); void numa_emu_cmdline(char *); #endif /* CONFIG_NUMA_EMU */ #else -static inline void init_cpu_to_node(void) { } -static inline void numa_set_node(int cpu, int node) { } -static inline void numa_clear_node(int cpu) { } -static inline void numa_add_cpu(int cpu, int node) { } -static inline void numa_remove_cpu(int cpu) { } +static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } #endif #endif /* _ASM_X86_NUMA_64_H */ diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index 641988efe063..c5d3a5abbb9f 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h @@ -6,7 +6,7 @@ #define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */ -#ifdef CONFIG_OLPC_OPENFIRMWARE +#ifdef CONFIG_OLPC extern bool olpc_ofw_is_installed(void); @@ -26,19 +26,15 @@ extern void setup_olpc_ofw_pgd(void); /* check if OFW was detected during boot */ extern bool olpc_ofw_present(void); -#else /* !CONFIG_OLPC_OPENFIRMWARE */ - -static inline bool olpc_ofw_is_installed(void) { return false; } +#else /* !CONFIG_OLPC */ static inline void olpc_ofw_detect(void) { } static inline void setup_olpc_ofw_pgd(void) { } -static inline bool olpc_ofw_present(void) { return false; } - -#endif /* !CONFIG_OLPC_OPENFIRMWARE */ +#endif /* !CONFIG_OLPC */ -#ifdef CONFIG_OLPC_OPENFIRMWARE_DT +#ifdef CONFIG_OF_PROMTREE extern void olpc_dt_build_devicetree(void); #else static inline void olpc_dt_build_devicetree(void) { } -#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */ +#endif #endif /* _ASM_X86_OLPC_OFW_H */ diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 1df66211fd1b..bce688d54c12 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -2,6 +2,7 @@ #define _ASM_X86_PAGE_DEFS_H #include <linux/const.h> +#include <linux/types.h> /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 @@ -45,11 +46,15 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; +static inline phys_addr_t get_max_mapped(void) +{ + return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; +} + extern unsigned long init_memory_mapping(unsigned long start, unsigned long end); -extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8); +extern void initmem_init(void); extern void free_initmem(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 7e172955ee57..a09e1f052d84 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -451,6 +451,26 @@ do { \ #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #endif /* !CONFIG_M386 */ +#ifdef CONFIG_X86_CMPXCHG64 +#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \ +({ \ + char __ret; \ + typeof(o1) __o1 = o1; \ + typeof(o1) __n1 = n1; \ + typeof(o2) __o2 = o2; \ + typeof(o2) __n2 = n2; \ + typeof(o2) __dummy = n2; \ + asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ + : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \ + : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \ + __ret; \ +}) + +#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#endif /* CONFIG_X86_CMPXCHG64 */ + /* * Per cpu atomic 64 bit operations are only available under 64 bit. * 32 bit must fall back to generic operations. @@ -480,6 +500,34 @@ do { \ #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) + +/* + * Pretty complex macro to generate cmpxchg16 instruction. The instruction + * is not supported on early AMD64 processors so we must be able to emulate + * it in software. The address used in the cmpxchg16 instruction must be + * aligned to a 16 byte boundary. + */ +#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ +({ \ + char __ret; \ + typeof(o1) __o1 = o1; \ + typeof(o1) __n1 = n1; \ + typeof(o2) __o2 = o2; \ + typeof(o2) __n2 = n2; \ + typeof(o2) __dummy; \ + alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \ + "cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \ + X86_FEATURE_CX16, \ + ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ + "S" (&pcp1), "b"(__n1), "c"(__n2), \ + "a"(__o1), "d"(__o2)); \ + __ret; \ +}) + +#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) +#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) + #endif /* This is not atomic against other CPUs -- CPU preemption needs to be off */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 45636cefa186..4c25ab48257b 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -94,10 +94,6 @@ struct cpuinfo_x86 { int x86_cache_alignment; /* In bytes */ int x86_power; unsigned long loops_per_jiffy; -#ifdef CONFIG_SMP - /* cpus sharing the last level cache: */ - cpumask_var_t llc_shared_map; -#endif /* cpuid returned max cores value: */ u16 x86_max_cores; u16 apicid; diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index b4ec95f07518..971e0b46446e 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -1 +1,69 @@ -/* dummy prom.h; here to make linux/of.h's #includes happy */ +/* + * Definitions for Device tree / OpenFirmware handling on X86 + * + * based on arch/powerpc/include/asm/prom.h which is + * Copyright (C) 1996-2005 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_X86_PROM_H +#define _ASM_X86_PROM_H +#ifndef __ASSEMBLY__ + +#include <linux/of.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include <asm/irq.h> +#include <asm/atomic.h> +#include <asm/setup.h> +#include <asm/irq_controller.h> + +#ifdef CONFIG_OF +extern int of_ioapic; +extern u64 initial_dtb; +extern void add_dtb(u64 data); +extern void x86_add_irq_domains(void); +void __cpuinit x86_of_pci_init(void); +void x86_dtb_init(void); + +static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) +{ + return pdev ? pdev->dev.of_node : NULL; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + return pci_device_to_OF_node(bus->self); +} + +#else +static inline void add_dtb(u64 data) { } +static inline void x86_add_irq_domains(void) { } +static inline void x86_of_pci_init(void) { } +static inline void x86_dtb_init(void) { } +#define of_ioapic 0 +#endif + +extern char cmd_line[COMMAND_LINE_SIZE]; + +#define pci_address_to_pio pci_address_to_pio +unsigned long pci_address_to_pio(phys_addr_t addr); + +/** + * irq_dispose_mapping - Unmap an interrupt + * @virq: linux virq number of the interrupt to unmap + * + * FIXME: We really should implement proper virq handling like power, + * but that's going to be major surgery. + */ +static inline void irq_dispose_mapping(unsigned int virq) { } + +#define HAVE_ARCH_DEVTREE_FIXUPS + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 562d4fd31ba8..3250e3d605d9 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -18,7 +18,10 @@ extern struct machine_ops machine_ops; void native_machine_crash_shutdown(struct pt_regs *regs); void native_machine_shutdown(void); -void machine_real_restart(const unsigned char *code, int length); +void machine_real_restart(unsigned int type); +/* These must match dispatch_table in reboot_32.S */ +#define MRR_BIOS 0 +#define MRR_APM 1 typedef void (*nmi_shootdown_cb)(int, struct die_args*); void nmi_shootdown_cpus(nmi_shootdown_cb callback); diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index d1e41b0f9b60..df4cd32b4cc6 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -37,26 +37,9 @@ #endif #ifdef __KERNEL__ - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/lockdep.h> #include <asm/asm.h> -struct rwsem_waiter; - -extern asmregparm struct rw_semaphore * - rwsem_down_read_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_down_write_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_wake(struct rw_semaphore *); -extern asmregparm struct rw_semaphore * - rwsem_downgrade_wake(struct rw_semaphore *sem); - /* - * the semaphore definition - * * The bias values and the counter type limits the number of * potential readers/writers to 32767 for 32 bits and 2147483647 * for 64 bits. @@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore * #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -typedef signed long rwsem_count_t; - -struct rw_semaphore { - rwsem_count_t count; - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - - -#define __RWSEM_INITIALIZER(name) \ -{ \ - RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ -} - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - /* * lock for reading */ @@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem) */ static inline int __down_read_trylock(struct rw_semaphore *sem) { - rwsem_count_t result, tmp; + long result, tmp; asm volatile("# beginning __down_read_trylock\n\t" " mov %0,%1\n\t" "1:\n\t" @@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) */ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { - rwsem_count_t tmp; + long tmp; asm volatile("# beginning down_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" /* adds 0xffff0001, returns the old value */ @@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem) */ static inline int __down_write_trylock(struct rw_semaphore *sem) { - rwsem_count_t ret = cmpxchg(&sem->count, - RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); + long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); if (ret == RWSEM_UNLOCKED_VALUE) return 1; return 0; @@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) */ static inline void __up_read(struct rw_semaphore *sem) { - rwsem_count_t tmp; + long tmp; asm volatile("# beginning __up_read\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" /* subtracts 1, returns the old value */ @@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem) */ static inline void __up_write(struct rw_semaphore *sem) { - rwsem_count_t tmp; + long tmp; asm volatile("# beginning __up_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" /* subtracts 0xffff0001, returns the old value */ @@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(rwsem_count_t delta, - struct rw_semaphore *sem) +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) { asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" : "+m" (sem->count) @@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta, /* * implement exchange and add functionality */ -static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, - struct rw_semaphore *sem) +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) { - rwsem_count_t tmp = delta; + long tmp = delta; asm volatile(LOCK_PREFIX "xadd %0,%1" : "+r" (tmp), "+m" (sem->count) @@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, return tmp + delta; } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _ASM_X86_RWSEM_H */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 231f1c1d6607..cd84f7208f76 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -1,14 +1,16 @@ #ifndef _ASM_X86_SEGMENT_H #define _ASM_X86_SEGMENT_H +#include <linux/const.h> + /* Constructor for a conventional segment GDT (or LDT) entry */ /* This is a macro so it can be used in initializers */ #define GDT_ENTRY(flags, base, limit) \ - ((((base) & 0xff000000ULL) << (56-24)) | \ - (((flags) & 0x0000f0ffULL) << 40) | \ - (((limit) & 0x000f0000ULL) << (48-16)) | \ - (((base) & 0x00ffffffULL) << 16) | \ - (((limit) & 0x0000ffffULL))) + ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \ + (((flags) & _AC(0x0000f0ff,ULL)) << 40) | \ + (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \ + (((base) & _AC(0x00ffffff,ULL)) << 16) | \ + (((limit) & _AC(0x0000ffff,ULL)))) /* Simple and small GDT entries for booting only */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 1f4695136776..73b11bc0ae6f 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -17,12 +17,24 @@ #endif #include <asm/thread_info.h> #include <asm/cpumask.h> +#include <asm/cpufeature.h> extern int smp_num_siblings; extern unsigned int num_processors; +static inline bool cpu_has_ht_siblings(void) +{ + bool has_siblings = false; +#ifdef CONFIG_SMP + has_siblings = cpu_has_ht && smp_num_siblings > 1; +#endif + return has_siblings; +} + DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); +/* cpus sharing the last level cache: */ +DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(int, cpu_number); @@ -36,8 +48,16 @@ static inline struct cpumask *cpu_core_mask(int cpu) return per_cpu(cpu_core_map, cpu); } +static inline struct cpumask *cpu_llc_shared_mask(int cpu) +{ + return per_cpu(cpu_llc_shared_map, cpu); +} + DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) +DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); +#endif /* Static state in head.S used to set up a CPU */ extern unsigned long stack_start; /* Initial stack pointer address */ diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 33ecc3ea8782..12569e691ce3 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -98,8 +98,6 @@ do { \ */ #define HAVE_DISABLE_HLT #else -#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" -#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" /* frame pointer must be last for get_wchan */ #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 21899cc31e52..910a7084f7f2 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -47,21 +47,6 @@ #include <asm/mpspec.h> -#ifdef CONFIG_X86_32 - -/* Mappings between logical cpu number and node number */ -extern int cpu_to_node_map[]; - -/* Returns the number of the node containing CPU 'cpu' */ -static inline int __cpu_to_node(int cpu) -{ - return cpu_to_node_map[cpu]; -} -#define early_cpu_to_node __cpu_to_node -#define cpu_to_node __cpu_to_node - -#else /* CONFIG_X86_64 */ - /* Mappings between logical cpu number and node number */ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); @@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu) #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ -#endif /* CONFIG_X86_64 */ - /* Mappings between node number and cpus on that node. */ extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; @@ -155,7 +138,7 @@ extern unsigned long node_remap_size[]; .balance_interval = 1, \ } -#ifdef CONFIG_X86_64_ACPI_NUMA +#ifdef CONFIG_X86_64 extern int __node_distance(int, int); #define node_distance(a, b) __node_distance(a, b) #endif diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index f4500fb3b485..feca3118a73b 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h @@ -3,25 +3,36 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_X86_TRAMPOLINE +#include <linux/types.h> +#include <asm/io.h> + /* - * Trampoline 80x86 program as an array. + * Trampoline 80x86 program as an array. These are in the init rodata + * segment, but that's okay, because we only care about the relative + * addresses of the symbols. */ -extern const unsigned char trampoline_data []; -extern const unsigned char trampoline_end []; -extern unsigned char *trampoline_base; +extern const unsigned char x86_trampoline_start []; +extern const unsigned char x86_trampoline_end []; +extern unsigned char *x86_trampoline_base; extern unsigned long init_rsp; extern unsigned long initial_code; extern unsigned long initial_gs; -#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) +extern void __init setup_trampolines(void); + +extern const unsigned char trampoline_data[]; +extern const unsigned char trampoline_status[]; + +#define TRAMPOLINE_SYM(x) \ + ((void *)(x86_trampoline_base + \ + ((const unsigned char *)(x) - x86_trampoline_start))) -extern unsigned long setup_trampoline(void); -extern void __init reserve_trampoline_memory(void); -#else -static inline void reserve_trampoline_memory(void) {} -#endif /* CONFIG_X86_TRAMPOLINE */ +/* Address of the SMP trampoline */ +static inline unsigned long trampoline_address(void) +{ + return virt_to_phys(TRAMPOLINE_SYM(trampoline_data)); +} #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index b766a5e8ba0e..ffaf183c619a 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h @@ -346,10 +346,13 @@ #define __NR_fanotify_init 338 #define __NR_fanotify_mark 339 #define __NR_prlimit64 340 +#define __NR_name_to_handle_at 341 +#define __NR_open_by_handle_at 342 +#define __NR_clock_adjtime 343 #ifdef __KERNEL__ -#define NR_syscalls 341 +#define NR_syscalls 344 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 363e9b8a715b..5466bea670e7 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h @@ -669,6 +669,12 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init) __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) #define __NR_prlimit64 302 __SYSCALL(__NR_prlimit64, sys_prlimit64) +#define __NR_name_to_handle_at 303 +__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) +#define __NR_open_by_handle_at 304 +__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) +#define __NR_clock_adjtime 305 +__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index ce1d54c8a433..3e094af443c3 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -176,7 +176,7 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ + unsigned int base_dest_nodeid:15; /* nasid of the */ /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 64642ad019fb..643ebf2e2ad8 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -83,11 +83,13 @@ struct x86_init_paging { * boot cpu * @tsc_pre_init: platform function called before TSC init * @timer_init: initialize the platform timer (default PIT/HPET) + * @wallclock_init: init the wallclock device */ struct x86_init_timers { void (*setup_percpu_clockev)(void); void (*tsc_pre_init)(void); void (*timer_init)(void); + void (*wallclock_init)(void); }; /** diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index a3c28ae4025b..8508bfe52296 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set) static inline int HYPERVISOR_sched_op(int cmd, void *arg) { - return _hypercall2(int, sched_op_new, cmd, arg); + return _hypercall2(int, sched_op, cmd, arg); } static inline long @@ -422,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value) #endif static inline int -HYPERVISOR_suspend(unsigned long srec) +HYPERVISOR_suspend(unsigned long start_info_mfn) { - return _hypercall3(int, sched_op, SCHEDOP_shutdown, - SHUTDOWN_suspend, srec); + struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; + + /* + * For a PV guest the tools require that the start_info mfn be + * present in rdx/edx when the hypercall is made. Per the + * hypercall calling convention this is the third hypercall + * argument, which is start_info_mfn here. + */ + return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn); } static inline int diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index f25bdf238a33..c61934fbf22a 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -29,8 +29,10 @@ typedef struct xpaddr { /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) -#define FOREIGN_FRAME_BIT (1UL<<31) +#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) +#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) +#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) /* Maximum amount of memory we can handle in a domain in pages */ #define MAX_DOMAIN_PAGES \ @@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order; extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); +extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +extern unsigned long set_phys_range_identity(unsigned long pfn_s, + unsigned long pfn_e); extern int m2p_add_override(unsigned long mfn, struct page *page); extern int m2p_remove_override(struct page *page); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); +#ifdef CONFIG_XEN_DEBUG_FS +extern int p2m_dump_show(struct seq_file *m, void *v); +#endif static inline unsigned long pfn_to_mfn(unsigned long pfn) { unsigned long mfn; @@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) mfn = get_phys_to_machine(pfn); if (mfn != INVALID_P2M_ENTRY) - mfn &= ~FOREIGN_FRAME_BIT; + mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); return mfn; } @@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; + int ret = 0; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; + if (unlikely((mfn >> machine_to_phys_order) != 0)) { + pfn = ~0; + goto try_override; + } pfn = 0; /* * The array access can fail (e.g., device space beyond end of RAM). * In such cases it doesn't matter what we return (we return garbage), * but we must handle the fault without crashing! */ - __get_user(pfn, &machine_to_phys_mapping[mfn]); - - /* - * If this appears to be a foreign mfn (because the pfn - * doesn't map back to the mfn), then check the local override - * table to see if there's a better pfn to use. + ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); +try_override: + /* ret might be < 0 if there are no entries in the m2p for mfn */ + if (ret < 0) + pfn = ~0; + else if (get_phys_to_machine(pfn) != mfn) + /* + * If this appears to be a foreign mfn (because the pfn + * doesn't map back to the mfn), then check the local override + * table to see if there's a better pfn to use. + * + * m2p_find_override_pfn returns ~0 if it doesn't find anything. + */ + pfn = m2p_find_override_pfn(mfn, ~0); + + /* + * pfn is ~0 if there are no entries in the m2p for mfn or if the + * entry doesn't map back to the mfn and m2p_override doesn't have a + * valid entry for it. */ - if (get_phys_to_machine(pfn) != mfn) - pfn = m2p_find_override_pfn(mfn, pfn); + if (pfn == ~0 && + get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn)) + pfn = mfn; return pfn; } diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 2329b3eaf8d3..aa8620989162 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h @@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void) * its own functions. */ struct xen_pci_frontend_ops { - int (*enable_msi)(struct pci_dev *dev, int **vectors); + int (*enable_msi)(struct pci_dev *dev, int vectors[]); void (*disable_msi)(struct pci_dev *dev); - int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec); + int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec); void (*disable_msix)(struct pci_dev *dev); }; extern struct xen_pci_frontend_ops *xen_pci_frontend; static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, - int **vectors) + int vectors[]) { if (xen_pci_frontend && xen_pci_frontend->enable_msi) return xen_pci_frontend->enable_msi(dev, vectors); @@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) xen_pci_frontend->disable_msi(dev); } static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, - int **vectors, int nvec) + int vectors[], int nvec) { if (xen_pci_frontend && xen_pci_frontend->enable_msix) return xen_pci_frontend->enable_msix(dev, vectors, nvec); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 34244b2cd880..743642f1a36c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -47,7 +47,7 @@ obj-y += tsc.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o -obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o +obj-y += trampoline.o trampoline_$(BITS).o obj-y += process.o obj-y += i387.o xsave.o obj-y += ptrace.o @@ -59,6 +59,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += cpu/ obj-y += acpi/ obj-y += reboot.o +obj-$(CONFIG_X86_32) += reboot_32.o obj-$(CONFIG_MCA) += mca_32.o obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_CPUID) += cpuid.o @@ -66,10 +67,9 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o +obj-$(CONFIG_SMP) += smpboot.o +obj-$(CONFIG_SMP) += tsc_sync.o obj-$(CONFIG_SMP) += setup_percpu.o -obj-$(CONFIG_X86_64_SMP) += tsc_sync.o -obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-y += apic/ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o @@ -109,6 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o +obj-$(CONFIG_OF) += devicetree.o ### # 64 bit specific files diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3e6e2d68f761..9a966c579af5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -595,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) nid = acpi_get_node(handle); if (nid == -1 || !node_online(nid)) return; -#ifdef CONFIG_X86_64 - apicid_to_node[physid] = nid; + set_apicid_to_node(physid, nid); numa_set_node(cpu, nid); -#else /* CONFIG_X86_32 */ - apicid_2_node[physid] = nid; - cpu_to_node_map[cpu] = nid; -#endif - #endif } diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index 28595d6df47c..ead21b663117 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S @@ -6,11 +6,17 @@ #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/processor-flags.h> +#include "wakeup.h" .code16 - .section ".header", "a" + .section ".jump", "ax" + .globl _start +_start: + cli + jmp wakeup_code /* This should match the structure in wakeup.h */ + .section ".header", "a" .globl wakeup_header wakeup_header: video_mode: .short 0 /* Video mode number */ @@ -30,14 +36,11 @@ wakeup_jmp: .byte 0xea /* ljmpw */ wakeup_jmp_off: .word 3f wakeup_jmp_seg: .word 0 wakeup_gdt: .quad 0, 0, 0 -signature: .long 0x51ee1111 +signature: .long WAKEUP_HEADER_SIGNATURE .text - .globl _start .code16 wakeup_code: -_start: - cli cld /* Apparently some dimwit BIOS programmers don't know how to @@ -77,12 +80,12 @@ _start: /* Check header signature... */ movl signature, %eax - cmpl $0x51ee1111, %eax + cmpl $WAKEUP_HEADER_SIGNATURE, %eax jne bogus_real_magic /* Check we really have everything... */ movl end_signature, %eax - cmpl $0x65a22c82, %eax + cmpl $WAKEUP_END_SIGNATURE, %eax jne bogus_real_magic /* Call the C code */ @@ -147,3 +150,7 @@ wakeup_heap: wakeup_stack: .space 2048 wakeup_stack_end: + + .section ".signature","a" +end_signature: + .long WAKEUP_END_SIGNATURE diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h index 69d38d0b2b64..e1828c07e79c 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.h +++ b/arch/x86/kernel/acpi/realmode/wakeup.h @@ -35,7 +35,8 @@ struct wakeup_header { extern struct wakeup_header wakeup_header; #endif -#define HEADER_OFFSET 0x3f00 -#define WAKEUP_SIZE 0x4000 +#define WAKEUP_HEADER_OFFSET 8 +#define WAKEUP_HEADER_SIGNATURE 0x51ee1111 +#define WAKEUP_END_SIGNATURE 0x65a22c82 #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 060fff8f5c5b..d4f8010a5b1b 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S @@ -13,9 +13,19 @@ ENTRY(_start) SECTIONS { . = 0; + .jump : { + *(.jump) + } = 0x90909090 + + . = WAKEUP_HEADER_OFFSET; + .header : { + *(.header) + } + + . = ALIGN(16); .text : { *(.text*) - } + } = 0x90909090 . = ALIGN(16); .rodata : { @@ -33,11 +43,6 @@ SECTIONS *(.data*) } - .signature : { - end_signature = .; - LONG(0x65a22c82) - } - . = ALIGN(16); .bss : { __bss_start = .; @@ -45,20 +50,13 @@ SECTIONS __bss_end = .; } - . = HEADER_OFFSET; - .header : { - *(.header) + .signature : { + *(.signature) } - . = ALIGN(16); _end = .; /DISCARD/ : { *(.note*) } - - /* - * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: - */ - . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); } diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 68d1537b8c81..4572c58e66d5 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -18,12 +18,8 @@ #include "realmode/wakeup.h" #include "sleep.h" -unsigned long acpi_wakeup_address; unsigned long acpi_realmode_flags; -/* address in low memory of the wakeup routine. */ -static unsigned long acpi_realmode; - #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) static char temp_stack[4096]; #endif @@ -33,22 +29,17 @@ static char temp_stack[4096]; * * Create an identity mapped page table and copy the wakeup routine to * low memory. - * - * Note that this is too late to change acpi_wakeup_address. */ int acpi_save_state_mem(void) { struct wakeup_header *header; + /* address in low memory of the wakeup routine. */ + char *acpi_realmode; - if (!acpi_realmode) { - printk(KERN_ERR "Could not allocate memory during boot, " - "S3 disabled\n"); - return -ENOMEM; - } - memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE); + acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code); - header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET); - if (header->signature != 0x51ee1111) { + header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET); + if (header->signature != WAKEUP_HEADER_SIGNATURE) { printk(KERN_ERR "wakeup header does not match\n"); return -EINVAL; } @@ -68,9 +59,7 @@ int acpi_save_state_mem(void) /* GDT[0]: GDT self-pointer */ header->wakeup_gdt[0] = (u64)(sizeof(header->wakeup_gdt) - 1) + - ((u64)(acpi_wakeup_address + - ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) - << 16); + ((u64)__pa(&header->wakeup_gdt) << 16); /* GDT[1]: big real mode-like code segment */ header->wakeup_gdt[1] = GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); @@ -96,7 +85,7 @@ int acpi_save_state_mem(void) header->pmode_cr3 = (u32)__pa(&initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ - header->trampoline_segment = setup_trampoline() >> 4; + header->trampoline_segment = trampoline_address() >> 4; #ifdef CONFIG_SMP stack_start = (unsigned long)temp_stack + sizeof(temp_stack); early_gdt_descr.address = @@ -117,46 +106,6 @@ void acpi_restore_state_mem(void) { } - -/** - * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation - * - * We allocate a page from the first 1MB of memory for the wakeup - * routine for when we come back from a sleep state. The - * runtime allocator allows specification of <16MB pages, but not - * <1MB pages. - */ -void __init acpi_reserve_wakeup_memory(void) -{ - phys_addr_t mem; - - if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { - printk(KERN_ERR - "ACPI: Wakeup code way too big, S3 disabled.\n"); - return; - } - - mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); - - if (mem == MEMBLOCK_ERROR) { - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); - return; - } - acpi_realmode = (unsigned long) phys_to_virt(mem); - acpi_wakeup_address = mem; - memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); -} - -int __init acpi_configure_wakeup_memory(void) -{ - if (acpi_realmode) - set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); - - return 0; -} -arch_initcall(acpi_configure_wakeup_memory); - - static int __init acpi_sleep_setup(char *str) { while ((str != NULL) && (*str != '\0')) { diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index adbcbaa6f1df..86ba1c87165b 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h @@ -4,13 +4,10 @@ #include <asm/trampoline.h> -extern char wakeup_code_start, wakeup_code_end; - extern unsigned long saved_video_mode; extern long saved_magic; extern int wakeup_pmode_return; -extern char swsusp_pg_dir[PAGE_SIZE]; extern unsigned long acpi_copy_wakeup_routine(unsigned long); extern void wakeup_long64(void); diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S index 6ff3b5730575..63b8ab524f2c 100644 --- a/arch/x86/kernel/acpi/wakeup_rm.S +++ b/arch/x86/kernel/acpi/wakeup_rm.S @@ -2,9 +2,11 @@ * Wrapper script for the realmode binary as a transport object * before copying to low memory. */ - .section ".rodata","a" - .globl wakeup_code_start, wakeup_code_end -wakeup_code_start: +#include <asm/page_types.h> + + .section ".x86_trampoline","a" + .balign PAGE_SIZE + .globl acpi_wakeup_code +acpi_wakeup_code: .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" -wakeup_code_end: - .size wakeup_code_start, .-wakeup_code_start + .size acpi_wakeup_code, .-acpi_wakeup_code diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 7038b95d363f..4db35544de73 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data) flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + p->len); } - + /* + * Intel Archiecture Software Developer's Manual section 7.1.3 specifies + * that a core serializing instruction such as "cpuid" should be + * executed on _each_ core before the new instruction is made visible. + */ + sync_core(); return 0; } diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 0a99f7198bc3..65634190ffd6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -12,7 +12,7 @@ static u32 *flush_words; -struct pci_device_id amd_nb_misc_ids[] = { +const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, @@ -20,6 +20,11 @@ struct pci_device_id amd_nb_misc_ids[] = { }; EXPORT_SYMBOL(amd_nb_misc_ids); +static struct pci_device_id amd_nb_link_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) }, + {} +}; + const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { { 0x00, 0x18, 0x20 }, { 0xff, 0x00, 0x20 }, @@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges; EXPORT_SYMBOL(amd_northbridges); static struct pci_dev *next_northbridge(struct pci_dev *dev, - struct pci_device_id *ids) + const struct pci_device_id *ids) { do { dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); @@ -43,9 +48,9 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev, int amd_cache_northbridges(void) { - int i = 0; + u16 i = 0; struct amd_northbridge *nb; - struct pci_dev *misc; + struct pci_dev *misc, *link; if (amd_nb_num()) return 0; @@ -64,10 +69,12 @@ int amd_cache_northbridges(void) amd_northbridges.nb = nb; amd_northbridges.num = i; - misc = NULL; + link = misc = NULL; for (i = 0; i != amd_nb_num(); i++) { node_to_amd_nb(i)->misc = misc = next_northbridge(misc, amd_nb_misc_ids); + node_to_amd_nb(i)->link = link = + next_northbridge(link, amd_nb_link_ids); } /* some CPU families (e.g. family 0x11) do not support GART */ @@ -85,26 +92,95 @@ int amd_cache_northbridges(void) boot_cpu_data.x86_mask >= 0x1)) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; + if (boot_cpu_data.x86 == 0x15) + amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; + + /* L3 cache partitioning is supported on family 0x15 */ + if (boot_cpu_data.x86 == 0x15) + amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; + return 0; } EXPORT_SYMBOL_GPL(amd_cache_northbridges); -/* Ignores subdevice/subvendor but as far as I can figure out - they're useless anyways */ -int __init early_is_amd_nb(u32 device) +/* + * Ignores subdevice/subvendor but as far as I can figure out + * they're useless anyways + */ +bool __init early_is_amd_nb(u32 device) { - struct pci_device_id *id; + const struct pci_device_id *id; u32 vendor = device & 0xffff; + device >>= 16; for (id = amd_nb_misc_ids; id->vendor; id++) if (vendor == id->vendor && device == id->device) - return 1; + return true; + return false; +} + +int amd_get_subcaches(int cpu) +{ + struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; + unsigned int mask; + int cuid = 0; + + if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return 0; + + pci_read_config_dword(link, 0x1d4, &mask); + +#ifdef CONFIG_SMP + cuid = cpu_data(cpu).compute_unit_id; +#endif + return (mask >> (4 * cuid)) & 0xf; +} + +int amd_set_subcaches(int cpu, int mask) +{ + static unsigned int reset, ban; + struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); + unsigned int reg; + int cuid = 0; + + if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) + return -EINVAL; + + /* if necessary, collect reset state of L3 partitioning and BAN mode */ + if (reset == 0) { + pci_read_config_dword(nb->link, 0x1d4, &reset); + pci_read_config_dword(nb->misc, 0x1b8, &ban); + ban &= 0x180000; + } + + /* deactivate BAN mode if any subcaches are to be disabled */ + if (mask != 0xf) { + pci_read_config_dword(nb->misc, 0x1b8, ®); + pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); + } + +#ifdef CONFIG_SMP + cuid = cpu_data(cpu).compute_unit_id; +#endif + mask <<= 4 * cuid; + mask |= (0xf ^ (1 << cuid)) << 26; + + pci_write_config_dword(nb->link, 0x1d4, mask); + + /* reset BAN mode if L3 partitioning returned to reset state */ + pci_read_config_dword(nb->link, 0x1d4, ®); + if (reg == reset) { + pci_read_config_dword(nb->misc, 0x1b8, ®); + reg &= ~0x180000; + pci_write_config_dword(nb->misc, 0x1b8, reg | ban); + } + return 0; } -int amd_cache_gart(void) +static int amd_cache_gart(void) { - int i; + u16 i; if (!amd_nb_has_feature(AMD_NB_GART)) return 0; diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51d4e1663066..1293c709ee85 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta, return 0; } -/* - * APB timer clock is not in sync with pclk on Langwell, which translates to - * unreliable read value caused by sampling error. the error does not add up - * overtime and only happens when sampling a 0 as a 1 by mistake. so the time - * would go backwards. the following code is trying to prevent time traveling - * backwards. little bit paranoid. - */ static cycle_t apbt_read_clocksource(struct clocksource *cs) { - unsigned long t0, t1, t2; - static unsigned long last_read; - -bad_count: - t1 = apbt_readl(phy_cs_timer_id, - APBTMR_N_CURRENT_VALUE); - t2 = apbt_readl(phy_cs_timer_id, - APBTMR_N_CURRENT_VALUE); - if (unlikely(t1 < t2)) { - pr_debug("APBT: read current count error %lx:%lx:%lx\n", - t1, t2, t2 - t1); - goto bad_count; - } - /* - * check against cached last read, makes sure time does not go back. - * it could be a normal rollover but we will do tripple check anyway - */ - if (unlikely(t2 > last_read)) { - /* check if we have a normal rollover */ - unsigned long raw_intr_status = - apbt_readl_reg(APBTMRS_RAW_INT_STATUS); - /* - * cs timer interrupt is masked but raw intr bit is set if - * rollover occurs. then we read EOI reg to clear it. - */ - if (raw_intr_status & (1 << phy_cs_timer_id)) { - apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); - goto out; - } - pr_debug("APB CS going back %lx:%lx:%lx ", - t2, last_read, t2 - last_read); -bad_count_x3: - pr_debug("triple check enforced\n"); - t0 = apbt_readl(phy_cs_timer_id, - APBTMR_N_CURRENT_VALUE); - udelay(1); - t1 = apbt_readl(phy_cs_timer_id, - APBTMR_N_CURRENT_VALUE); - udelay(1); - t2 = apbt_readl(phy_cs_timer_id, - APBTMR_N_CURRENT_VALUE); - if ((t2 > t1) || (t1 > t0)) { - printk(KERN_ERR "Error: APB CS tripple check failed\n"); - goto bad_count_x3; - } - } -out: - last_read = t2; - return (cycle_t)~t2; + unsigned long current_count; + + current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE); + return (cycle_t)~current_count; } static int apbt_clocksource_register(void) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 5955a7800a96..7b1e8e10b89c 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -13,7 +13,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/pci_ids.h> #include <linux/pci.h> @@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) static u32 __init allocate_aperture(void) { u32 aper_size; - void *p; + unsigned long addr; /* aper_size should <= 1G */ if (fallback_aper_order > 5) @@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void) * so don't use 512M below as gart iommu, leave the space for kernel * code for safe */ - p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); + addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); + if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { + printk(KERN_ERR + "Cannot allocate aperture memory hole (%lx,%uK)\n", + addr, aper_size>>10); + return 0; + } + memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); /* * Kmemleak should not scan this block as it may not be mapped via the * kernel direct mapping. */ - kmemleak_ignore(p); - if (!p || __pa(p)+aper_size > 0xffffffff) { - printk(KERN_ERR - "Cannot allocate aperture memory hole (%p,%uK)\n", - p, aper_size>>10); - if (p) - free_bootmem(__pa(p), aper_size); - return 0; - } + kmemleak_ignore(phys_to_virt(addr)); printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", - aper_size >> 10, __pa(p)); - insert_aperture_resource((u32)__pa(p), aper_size); - register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, - (u32)__pa(p+aper_size) >> PAGE_SHIFT); + aper_size >> 10, addr); + insert_aperture_resource((u32)addr, aper_size); + register_nosave_region(addr >> PAGE_SHIFT, + (addr+aper_size) >> PAGE_SHIFT); - return (u32)__pa(p); + return (u32)addr; } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 76b96d74978a..966673f44141 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -43,6 +43,7 @@ #include <asm/i8259.h> #include <asm/proto.h> #include <asm/apic.h> +#include <asm/io_apic.h> #include <asm/desc.h> #include <asm/hpet.h> #include <asm/idle.h> @@ -78,12 +79,21 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); #ifdef CONFIG_X86_32 + +/* + * On x86_32, the mapping between cpu and logical apicid may vary + * depending on apic in use. The following early percpu variable is + * used for the mapping. This is where the behaviors of x86_64 and 32 + * actually diverge. Let's keep it ugly for now. + */ +DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID); + /* * Knob to control our willingness to enable the local APIC. * * +1=force-enable */ -static int force_enable_local_apic; +static int force_enable_local_apic __initdata; /* * APIC command line parameters */ @@ -153,7 +163,7 @@ early_param("nox2apic", setup_nox2apic); unsigned long mp_lapic_addr; int disable_apic; /* Disable local APIC timer from the kernel commandline or via dmi quirk */ -static int disable_apic_timer __cpuinitdata; +static int disable_apic_timer __initdata; /* Local APIC timer works in C2 */ int local_apic_timer_c2_ok; EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); @@ -177,29 +187,8 @@ static struct resource lapic_resource = { static unsigned int calibration_result; -static int lapic_next_event(unsigned long delta, - struct clock_event_device *evt); -static void lapic_timer_setup(enum clock_event_mode mode, - struct clock_event_device *evt); -static void lapic_timer_broadcast(const struct cpumask *mask); static void apic_pm_activate(void); -/* - * The local apic timer can be used for any function which is CPU local. - */ -static struct clock_event_device lapic_clockevent = { - .name = "lapic", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT - | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, - .shift = 32, - .set_mode = lapic_timer_setup, - .set_next_event = lapic_next_event, - .broadcast = lapic_timer_broadcast, - .rating = 100, - .irq = -1, -}; -static DEFINE_PER_CPU(struct clock_event_device, lapic_events); - static unsigned long apic_phys; /* @@ -238,7 +227,7 @@ static int modern_apic(void) * right after this call apic become NOOP driven * so apic->write/read doesn't do anything */ -void apic_disable(void) +static void __init apic_disable(void) { pr_info("APIC: switched to apic NOOP\n"); apic = &apic_noop; @@ -282,23 +271,6 @@ u64 native_apic_icr_read(void) return icr1 | ((u64)icr2 << 32); } -/** - * enable_NMI_through_LVT0 - enable NMI through local vector table 0 - */ -void __cpuinit enable_NMI_through_LVT0(void) -{ - unsigned int v; - - /* unmask and set to NMI */ - v = APIC_DM_NMI; - - /* Level triggered for 82489DX (32bit mode) */ - if (!lapic_is_integrated()) - v |= APIC_LVT_LEVEL_TRIGGER; - - apic_write(APIC_LVT0, v); -} - #ifdef CONFIG_X86_32 /** * get_physical_broadcast - Get number of physical broadcast IDs @@ -508,6 +480,23 @@ static void lapic_timer_broadcast(const struct cpumask *mask) #endif } + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device lapic_clockevent = { + .name = "lapic", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT + | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, + .shift = 32, + .set_mode = lapic_timer_setup, + .set_next_event = lapic_next_event, + .broadcast = lapic_timer_broadcast, + .rating = 100, + .irq = -1, +}; +static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + /* * Setup the local APIC timer for this CPU. Copy the initialized values * of the boot CPU and register the clock event in the framework. @@ -1209,7 +1198,7 @@ void __cpuinit setup_local_APIC(void) rdtscll(tsc); if (disable_apic) { - arch_disable_smp_support(); + disable_ioapic_support(); return; } @@ -1237,6 +1226,19 @@ void __cpuinit setup_local_APIC(void) */ apic->init_apic_ldr(); +#ifdef CONFIG_X86_32 + /* + * APIC LDR is initialized. If logical_apicid mapping was + * initialized during get_smp_config(), make sure it matches the + * actual value. + */ + i = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + WARN_ON(i != BAD_APICID && i != logical_smp_processor_id()); + /* always use the value from LDR */ + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = + logical_smp_processor_id(); +#endif + /* * Set Task Priority to 'accept all'. We never change this * later on. @@ -1448,7 +1450,7 @@ int __init enable_IR(void) void __init enable_IR_x2apic(void) { unsigned long flags; - struct IO_APIC_route_entry **ioapic_entries = NULL; + struct IO_APIC_route_entry **ioapic_entries; int ret, x2apic_enabled = 0; int dmar_table_init_ret; @@ -1537,7 +1539,7 @@ static int __init detect_init_APIC(void) } #else -static int apic_verify(void) +static int __init apic_verify(void) { u32 features, h, l; @@ -1562,7 +1564,7 @@ static int apic_verify(void) return 0; } -int apic_force_enable(void) +int __init apic_force_enable(unsigned long addr) { u32 h, l; @@ -1578,7 +1580,7 @@ int apic_force_enable(void) if (!(l & MSR_IA32_APICBASE_ENABLE)) { pr_info("Local APIC disabled by BIOS -- reenabling.\n"); l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; + l |= MSR_IA32_APICBASE_ENABLE | addr; wrmsr(MSR_IA32_APICBASE, l, h); enabled_via_apicbase = 1; } @@ -1619,7 +1621,7 @@ static int __init detect_init_APIC(void) "you can enable it with \"lapic\"\n"); return -1; } - if (apic_force_enable()) + if (apic_force_enable(APIC_DEFAULT_PHYS_BASE)) return -1; } else { if (apic_verify()) @@ -1930,17 +1932,6 @@ void __cpuinit generic_processor_info(int apicid, int version) { int cpu; - /* - * Validate version - */ - if (version == 0x0) { - pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - version); - version = 0x10; - } - apic_version[apicid] = version; - if (num_processors >= nr_cpu_ids) { int max = nr_cpu_ids; int thiscpu = max + disabled_cpus; @@ -1954,22 +1945,34 @@ void __cpuinit generic_processor_info(int apicid, int version) } num_processors++; - cpu = cpumask_next_zero(-1, cpu_present_mask); - - if (version != apic_version[boot_cpu_physical_apicid]) - WARN_ONCE(1, - "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", - apic_version[boot_cpu_physical_apicid], cpu, version); - - physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { /* * x86_bios_cpu_apicid is required to have processors listed * in same order as logical cpu numbers. Hence the first * entry is BSP, and so on. + * boot_cpu_init() already hold bit 0 in cpu_present_mask + * for BSP. */ cpu = 0; + } else + cpu = cpumask_next_zero(-1, cpu_present_mask); + + /* + * Validate version + */ + if (version == 0x0) { + pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n", + cpu, apicid); + version = 0x10; } + apic_version[apicid] = version; + + if (version != apic_version[boot_cpu_physical_apicid]) { + pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n", + apic_version[boot_cpu_physical_apicid], cpu, version); + } + + physid_set(apicid, phys_cpu_present_map); if (apicid > max_physical_apicid) max_physical_apicid = apicid; @@ -1977,7 +1980,10 @@ void __cpuinit generic_processor_info(int apicid, int version) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; #endif - +#ifdef CONFIG_X86_32 + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = + apic->x86_32_early_logical_apicid(cpu); +#endif set_cpu_possible(cpu, true); set_cpu_present(cpu, true); } @@ -1998,10 +2004,14 @@ void default_init_apic_ldr(void) } #ifdef CONFIG_X86_32 -int default_apicid_to_node(int logical_apicid) +int default_x86_32_numa_cpu_node(int cpu) { -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; +#ifdef CONFIG_NUMA + int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); + + if (apicid != BAD_APICID) + return __apicid_to_node[apicid]; + return NUMA_NO_NODE; #else return 0; #endif diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 09d3b17ce0c2..5652d31fe108 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -185,8 +185,6 @@ struct apic apic_flat = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, @@ -337,8 +335,6 @@ struct apic apic_physflat = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e31b9ffe25f5..f1baa2dc087a 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void) return 0; } -static int noop_cpu_to_logical_apicid(int cpu) -{ - return 0; -} - static int noop_phys_pkg_id(int cpuid_apic, int index_msb) { return 0; @@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) cpumask_set_cpu(cpu, retmask); } -int noop_apicid_to_node(int logical_apicid) -{ - /* we're always on node 0 */ - return 0; -} - static u32 noop_apic_read(u32 reg) { WARN_ON_ONCE((cpu_has_apic && !disable_apic)); @@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v) WARN_ON_ONCE(cpu_has_apic && !disable_apic); } +#ifdef CONFIG_X86_32 +static int noop_x86_32_numa_cpu_node(int cpu) +{ + /* we're always on node 0 */ + return 0; +} +#endif + struct apic apic_noop = { .name = "noop", .probe = noop_probe, @@ -153,9 +150,7 @@ struct apic apic_noop = { .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = noop_apicid_to_node, - .cpu_to_logical_apicid = noop_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, @@ -197,4 +192,9 @@ struct apic apic_noop = { .icr_write = noop_apic_icr_write, .wait_icr_idle = noop_apic_wait_icr_idle, .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, + +#ifdef CONFIG_X86_32 + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node, +#endif }; diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index cb804c5091b9..541a2e431659 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit) return 1; } +static int bigsmp_early_logical_apicid(int cpu) +{ + /* on bigsmp, logical apicid is the same as physical */ + return early_per_cpu(x86_cpu_to_apicid, cpu); +} + static inline unsigned long calculate_ldr(int cpu) { unsigned long val, id; @@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void) nr_ioapics); } -static int bigsmp_apicid_to_node(int logical_apicid) -{ - return apicid_2_node[hard_smp_processor_id()]; -} - static int bigsmp_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) @@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -/* Mapping from cpu number to logical apicid */ -static inline int bigsmp_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_physical_id(cpu); -} - static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ @@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) /* As we are using single CPU as destination, pick only one CPU here */ static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) { - return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); + int cpu = cpumask_first(cpumask); + + if (cpu < nr_cpu_ids) + return cpu_physical_id(cpu); + return BAD_APICID; } static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, @@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, */ for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; + return cpu_physical_id(cpu); } - return bigsmp_cpu_to_logical_apicid(cpu); + return BAD_APICID; } static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) @@ -219,8 +216,6 @@ struct apic apic_bigsmp = { .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = bigsmp_apicid_to_node, - .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, @@ -256,4 +251,7 @@ struct apic apic_bigsmp = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, + .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8593582d8022..3e9de4854c5b 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit) return physid_isset(bit, phys_cpu_present_map); } +static int es7000_early_logical_apicid(int cpu) +{ + /* on es7000, logical apicid is the same as physical */ + return early_per_cpu(x86_bios_cpu_apicid, cpu); +} + static unsigned long calculate_ldr(int cpu) { unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); @@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void) nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); } -static int es7000_apicid_to_node(int logical_apicid) +static int es7000_numa_cpu_node(int cpu) { return 0; } - static int es7000_cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) @@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) ++cpu_id; } -/* Mapping from cpu number to logical apicid */ -static int es7000_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ @@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) * The cpus in the mask must all be on the apic cluster. */ for_each_cpu(cpu, cpumask) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); + int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { WARN(1, "Not a valid mask!"); @@ -578,7 +571,7 @@ static unsigned int es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int apicid = es7000_cpu_to_logical_apicid(0); + int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) @@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = { .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = es7000_early_logical_apicid, + .x86_32_numa_cpu_node = es7000_numa_cpu_node, }; struct apic __refdata apic_es7000 = { @@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = { .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = es7000_early_logical_apicid, + .x86_32_numa_cpu_node = es7000_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 79fd43ca6f96..c4e557a1ebb6 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, arch_spin_lock(&lock); printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); show_regs(regs); - dump_stack(); arch_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return NOTIFY_STOP; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ca9e2a3545a9..4b5ebd26f565 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int skip_ioapic_setup; -void arch_disable_smp_support(void) +/** + * disable_ioapic_support() - disables ioapic support at runtime + */ +void disable_ioapic_support(void) { #ifdef CONFIG_PCI noioapicquirk = 1; @@ -120,11 +123,14 @@ void arch_disable_smp_support(void) static int __init parse_noapic(char *str) { /* disable IO-APIC */ - arch_disable_smp_support(); + disable_ioapic_support(); return 0; } early_param("noapic", parse_noapic); +static int io_apic_setup_irq_pin_once(unsigned int irq, int node, + struct io_apic_irq_attr *attr); + /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ void mp_save_irq(struct mpc_intsrc *m) { @@ -181,7 +187,7 @@ int __init arch_early_irq_init(void) irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); for (i = 0; i < count; i++) { - set_irq_chip_data(i, &cfg[i]); + irq_set_chip_data(i, &cfg[i]); zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); /* @@ -200,7 +206,7 @@ int __init arch_early_irq_init(void) #ifdef CONFIG_SPARSE_IRQ static struct irq_cfg *irq_cfg(unsigned int irq) { - return get_irq_chip_data(irq); + return irq_get_chip_data(irq); } static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) @@ -226,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { if (!cfg) return; - set_irq_chip_data(at, NULL); + irq_set_chip_data(at, NULL); free_cpumask_var(cfg->domain); free_cpumask_var(cfg->old_domain); kfree(cfg); @@ -256,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) if (res < 0) { if (res != -EEXIST) return NULL; - cfg = get_irq_chip_data(at); + cfg = irq_get_chip_data(at); if (cfg) return cfg; } cfg = alloc_irq_cfg(at, node); if (cfg) - set_irq_chip_data(at, cfg); + irq_set_chip_data(at, cfg); else irq_free_desc(at); return cfg; @@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq) #define default_MCA_trigger(idx) (1) #define default_MCA_polarity(idx) default_ISA_polarity(idx) -static int MPBIOS_polarity(int idx) +static int irq_polarity(int idx) { int bus = mp_irqs[idx].srcbus; int polarity; @@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx) return polarity; } -static int MPBIOS_trigger(int idx) +static int irq_trigger(int idx) { int bus = mp_irqs[idx].srcbus; int trigger; @@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx) return trigger; } -static inline int irq_polarity(int idx) -{ - return MPBIOS_polarity(idx); -} - -static inline int irq_trigger(int idx) -{ - return MPBIOS_trigger(idx); -} - static int pin_2_irq(int idx, int apic, int pin) { int irq; @@ -1189,7 +1185,7 @@ void __setup_vector_irq(int cpu) raw_spin_lock(&vector_lock); /* Mark the inuse vectors */ for_each_active_irq(irq) { - cfg = get_irq_chip_data(irq); + cfg = irq_get_chip_data(irq); if (!cfg) continue; /* @@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu) static struct irq_chip ioapic_chip; static struct irq_chip ir_ioapic_chip; -#define IOAPIC_AUTO -1 -#define IOAPIC_EDGE 0 -#define IOAPIC_LEVEL 1 - #ifdef CONFIG_X86_32 static inline int IO_APIC_irq_trigger(int irq) { @@ -1248,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq) } #endif -static void ioapic_register_intr(unsigned int irq, unsigned long trigger) +static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, + unsigned long trigger) { + struct irq_chip *chip = &ioapic_chip; + irq_flow_handler_t hdl; + bool fasteoi; if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) + trigger == IOAPIC_LEVEL) { irq_set_status_flags(irq, IRQ_LEVEL); - else + fasteoi = true; + } else { irq_clear_status_flags(irq, IRQ_LEVEL); + fasteoi = false; + } - if (irq_remapped(get_irq_chip_data(irq))) { + if (irq_remapped(cfg)) { irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - if (trigger) - set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, - handle_fasteoi_irq, - "fasteoi"); - else - set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, - handle_edge_irq, "edge"); - return; + chip = &ir_ioapic_chip; + fasteoi = trigger != 0; } - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - set_irq_chip_and_handler_name(irq, &ioapic_chip, - handle_fasteoi_irq, - "fasteoi"); - else - set_irq_chip_and_handler_name(irq, &ioapic_chip, - handle_edge_irq, "edge"); + hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; + irq_set_chip_and_handler_name(irq, chip, hdl, + fasteoi ? "fasteoi" : "edge"); } static int setup_ioapic_entry(int apic_id, int irq, @@ -1374,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, return; } - ioapic_register_intr(irq, trigger); + ioapic_register_intr(irq, cfg, trigger); if (irq < legacy_pic->nr_legacy_irqs) legacy_pic->mask(irq); @@ -1385,33 +1373,26 @@ static struct { DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); } mp_ioapic_routing[MAX_IO_APICS]; -static void __init setup_IO_APIC_irqs(void) +static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) { - int apic_id, pin, idx, irq, notcon = 0; - int node = cpu_to_node(0); - struct irq_cfg *cfg; + if (idx != -1) + return false; - apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", + mp_ioapics[apic_id].apicid, pin); + return true; +} + +static void __init __io_apic_setup_irqs(unsigned int apic_id) +{ + int idx, node = cpu_to_node(0); + struct io_apic_irq_attr attr; + unsigned int pin, irq; - for (apic_id = 0; apic_id < nr_ioapics; apic_id++) for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { idx = find_irq_entry(apic_id, pin, mp_INT); - if (idx == -1) { - if (!notcon) { - notcon = 1; - apic_printk(APIC_VERBOSE, - KERN_DEBUG " %d-%d", - mp_ioapics[apic_id].apicid, pin); - } else - apic_printk(APIC_VERBOSE, " %d-%d", - mp_ioapics[apic_id].apicid, pin); + if (io_apic_pin_not_connected(idx, apic_id, pin)) continue; - } - if (notcon) { - apic_printk(APIC_VERBOSE, - " (apicid-pin) not connected\n"); - notcon = 0; - } irq = pin_2_irq(idx, apic_id, pin); @@ -1423,25 +1404,24 @@ static void __init setup_IO_APIC_irqs(void) * installed and if it returns 1: */ if (apic->multi_timer_check && - apic->multi_timer_check(apic_id, irq)) + apic->multi_timer_check(apic_id, irq)) continue; - cfg = alloc_irq_and_cfg_at(irq, node); - if (!cfg) - continue; + set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), + irq_polarity(idx)); - add_pin_to_irq_node(cfg, node, apic_id, pin); - /* - * don't mark it in pin_programmed, so later acpi could - * set it correctly when irq < 16 - */ - setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), - irq_polarity(idx)); + io_apic_setup_irq_pin(irq, node, &attr); } +} - if (notcon) - apic_printk(APIC_VERBOSE, - " (apicid-pin) not connected\n"); +static void __init setup_IO_APIC_irqs(void) +{ + unsigned int apic_id; + + apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + + for (apic_id = 0; apic_id < nr_ioapics; apic_id++) + __io_apic_setup_irqs(apic_id); } /* @@ -1452,7 +1432,7 @@ static void __init setup_IO_APIC_irqs(void) void setup_IO_APIC_irq_extra(u32 gsi) { int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); - struct irq_cfg *cfg; + struct io_apic_irq_attr attr; /* * Convert 'gsi' to 'ioapic.pin'. @@ -1472,21 +1452,10 @@ void setup_IO_APIC_irq_extra(u32 gsi) if (apic_id == 0 || irq < NR_IRQS_LEGACY) return; - cfg = alloc_irq_and_cfg_at(irq, node); - if (!cfg) - return; - - add_pin_to_irq_node(cfg, node, apic_id, pin); - - if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { - pr_debug("Pin %d-%d already programmed\n", - mp_ioapics[apic_id].apicid, pin); - return; - } - set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); + set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), + irq_polarity(idx)); - setup_ioapic_irq(apic_id, pin, irq, cfg, - irq_trigger(idx), irq_polarity(idx)); + io_apic_setup_irq_pin_once(irq, node, &attr); } /* @@ -1518,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, * The timer IRQ doesn't have to know that behind the * scene we may have a 8259A-master in AEOI mode ... */ - set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); + irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, + "edge"); /* * Add it to the IO-APIC irq-routing table: @@ -1625,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void) for_each_active_irq(irq) { struct irq_pin_list *entry; - cfg = get_irq_chip_data(irq); + cfg = irq_get_chip_data(irq); if (!cfg) continue; entry = cfg->irq_2_pin; @@ -2391,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg) void irq_force_complete_move(int irq) { - struct irq_cfg *cfg = get_irq_chip_data(irq); + struct irq_cfg *cfg = irq_get_chip_data(irq); if (!cfg) return; @@ -2405,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { } static void ack_apic_edge(struct irq_data *data) { irq_complete_move(data->chip_data); - move_native_irq(data->irq); + irq_move_irq(data); ack_APIC_irq(); } @@ -2462,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data) irq_complete_move(cfg); #ifdef CONFIG_GENERIC_PENDING_IRQ /* If we are moving the irq we need to mask it */ - if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { + if (unlikely(irqd_is_setaffinity_pending(data))) { do_unmask_irq = 1; mask_ioapic(cfg); } @@ -2551,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data) * and you can go talk to the chipset vendor about it. */ if (!io_apic_level_ack_pending(cfg)) - move_masked_irq(irq); + irq_move_masked_irq(data); unmask_ioapic(cfg); } } @@ -2614,7 +2584,7 @@ static inline void init_IO_APIC_traps(void) * 0x80, because int 0x80 is hm, kind of importantish. ;) */ for_each_active_irq(irq) { - cfg = get_irq_chip_data(irq); + cfg = irq_get_chip_data(irq); if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* * Hmm.. We don't have an entry for this, @@ -2625,7 +2595,7 @@ static inline void init_IO_APIC_traps(void) legacy_pic->make_irq(irq); else /* Strange. Oh, well.. */ - set_irq_chip(irq, &no_irq_chip); + irq_set_chip(irq, &no_irq_chip); } } } @@ -2665,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = { static void lapic_register_intr(int irq) { irq_clear_status_flags(irq, IRQ_LEVEL); - set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, + irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge"); } @@ -2749,7 +2719,7 @@ int timer_through_8259 __initdata; */ static inline void __init check_timer(void) { - struct irq_cfg *cfg = get_irq_chip_data(0); + struct irq_cfg *cfg = irq_get_chip_data(0); int node = cpu_to_node(0); int apic1, pin1, apic2, pin2; unsigned long flags; @@ -3060,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node) raw_spin_unlock_irqrestore(&vector_lock, flags); if (ret) { - set_irq_chip_data(irq, cfg); + irq_set_chip_data(irq, cfg); irq_clear_status_flags(irq, IRQ_NOREQUEST); } else { free_irq_at(irq, cfg); @@ -3085,7 +3055,7 @@ int create_irq(void) void destroy_irq(unsigned int irq) { - struct irq_cfg *cfg = get_irq_chip_data(irq); + struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned long flags; irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); @@ -3119,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); - if (irq_remapped(get_irq_chip_data(irq))) { + if (irq_remapped(cfg)) { struct irte irte; int ir_index; u16 sub_handle; @@ -3291,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) { + struct irq_chip *chip = &msi_chip; struct msi_msg msg; int ret; @@ -3298,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) if (ret < 0) return ret; - set_irq_msi(irq, msidesc); + irq_set_msi_desc(irq, msidesc); write_msi_msg(irq, &msg); - if (irq_remapped(get_irq_chip_data(irq))) { + if (irq_remapped(irq_get_chip_data(irq))) { irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); - } else - set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); + chip = &msi_ir_chip; + } + + irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); @@ -3423,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq) if (ret < 0) return ret; dmar_msi_write(irq, &msg); - set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, - "edge"); + irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, + "edge"); return 0; } #endif @@ -3482,6 +3454,7 @@ static struct irq_chip hpet_msi_type = { int arch_setup_hpet_msi(unsigned int irq, unsigned int id) { + struct irq_chip *chip = &hpet_msi_type; struct msi_msg msg; int ret; @@ -3501,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) if (ret < 0) return ret; - hpet_msi_write(get_irq_data(irq), &msg); + hpet_msi_write(irq_get_handler_data(irq), &msg); irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - if (irq_remapped(get_irq_chip_data(irq))) - set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, - handle_edge_irq, "edge"); - else - set_irq_chip_and_handler_name(irq, &hpet_msi_type, - handle_edge_irq, "edge"); + if (irq_remapped(irq_get_chip_data(irq))) + chip = &ir_hpet_msi_type; + irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); return 0; } #endif @@ -3596,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) write_ht_irq_msg(irq, &msg); - set_irq_chip_and_handler_name(irq, &ht_irq_chip, + irq_set_chip_and_handler_name(irq, &ht_irq_chip, handle_edge_irq, "edge"); dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); @@ -3605,7 +3575,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) } #endif /* CONFIG_HT_IRQ */ -int __init io_apic_get_redir_entries (int ioapic) +int +io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) +{ + struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); + int ret; + + if (!cfg) + return -EINVAL; + ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); + if (!ret) + setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, + attr->trigger, attr->polarity); + return ret; +} + +static int io_apic_setup_irq_pin_once(unsigned int irq, int node, + struct io_apic_irq_attr *attr) +{ + unsigned int id = attr->ioapic, pin = attr->ioapic_pin; + int ret; + + /* Avoid redundant programming */ + if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) { + pr_debug("Pin %d-%d already programmed\n", + mp_ioapics[id].apicid, pin); + return 0; + } + ret = io_apic_setup_irq_pin(irq, node, attr); + if (!ret) + set_bit(pin, mp_ioapic_routing[id].pin_programmed); + return ret; +} + +static int __init io_apic_get_redir_entries(int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; @@ -3659,96 +3662,24 @@ int __init arch_probe_nr_irqs(void) } #endif -static int __io_apic_set_pci_routing(struct device *dev, int irq, - struct io_apic_irq_attr *irq_attr) +int io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr) { - struct irq_cfg *cfg; int node; - int ioapic, pin; - int trigger, polarity; - ioapic = irq_attr->ioapic; if (!IO_APIC_IRQ(irq)) { apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", - ioapic); + irq_attr->ioapic); return -EINVAL; } - if (dev) - node = dev_to_node(dev); - else - node = cpu_to_node(0); - - cfg = alloc_irq_and_cfg_at(irq, node); - if (!cfg) - return 0; - - pin = irq_attr->ioapic_pin; - trigger = irq_attr->trigger; - polarity = irq_attr->polarity; + node = dev ? dev_to_node(dev) : cpu_to_node(0); - /* - * IRQs < 16 are already in the irq_2_pin[] map - */ - if (irq >= legacy_pic->nr_legacy_irqs) { - if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { - printk(KERN_INFO "can not add pin %d for irq %d\n", - pin, irq); - return 0; - } - } - - setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); - - return 0; + return io_apic_setup_irq_pin_once(irq, node, irq_attr); } -int io_apic_set_pci_routing(struct device *dev, int irq, - struct io_apic_irq_attr *irq_attr) -{ - int ioapic, pin; - /* - * Avoid pin reprogramming. PRTs typically include entries - * with redundant pin->gsi mappings (but unique PCI devices); - * we only program the IOAPIC on the first. - */ - ioapic = irq_attr->ioapic; - pin = irq_attr->ioapic_pin; - if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { - pr_debug("Pin %d-%d already programmed\n", - mp_ioapics[ioapic].apicid, pin); - return 0; - } - set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); - - return __io_apic_set_pci_routing(dev, irq, irq_attr); -} - -u8 __init io_apic_unique_id(u8 id) -{ #ifdef CONFIG_X86_32 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && - !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - return io_apic_get_unique_id(nr_ioapics, id); - else - return id; -#else - int i; - DECLARE_BITMAP(used, 256); - - bitmap_zero(used, 256); - for (i = 0; i < nr_ioapics; i++) { - struct mpc_ioapic *ia = &mp_ioapics[i]; - __set_bit(ia->apicid, used); - } - if (!test_bit(id, used)) - return id; - return find_first_zero_bit(used, 256); -#endif -} - -#ifdef CONFIG_X86_32 -int __init io_apic_get_unique_id(int ioapic, int apic_id) +static int __init io_apic_get_unique_id(int ioapic, int apic_id) { union IO_APIC_reg_00 reg_00; static physid_mask_t apic_id_map = PHYSID_MASK_NONE; @@ -3821,9 +3752,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) return apic_id; } + +static u8 __init io_apic_unique_id(u8 id) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else + return id; +} +#else +static u8 __init io_apic_unique_id(u8 id) +{ + int i; + DECLARE_BITMAP(used, 256); + + bitmap_zero(used, 256); + for (i = 0; i < nr_ioapics; i++) { + struct mpc_ioapic *ia = &mp_ioapics[i]; + __set_bit(ia->apicid, used); + } + if (!test_bit(id, used)) + return id; + return find_first_zero_bit(used, 256); +} #endif -int __init io_apic_get_version(int ioapic) +static int __init io_apic_get_version(int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; @@ -3868,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) void __init setup_ioapic_dest(void) { int pin, ioapic, irq, irq_entry; - struct irq_desc *desc; const struct cpumask *mask; + struct irq_data *idata; if (skip_ioapic_setup == 1) return; @@ -3884,21 +3839,20 @@ void __init setup_ioapic_dest(void) if ((ioapic > 0) && (irq > 16)) continue; - desc = irq_to_desc(irq); + idata = irq_get_irq_data(irq); /* * Honour affinities which have been set in early boot */ - if (desc->status & - (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = desc->irq_data.affinity; + if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) + mask = idata->affinity; else mask = apic->target_cpus(); if (intr_remapping_enabled) - ir_ioapic_set_affinity(&desc->irq_data, mask, false); + ir_ioapic_set_affinity(idata, mask, false); else - ioapic_set_affinity(&desc->irq_data, mask, false); + ioapic_set_affinity(idata, mask, false); } } @@ -4026,7 +3980,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi) return gsi - mp_gsi_routing[ioapic].gsi_base; } -static int bad_ioapic(unsigned long address) +static __init int bad_ioapic(unsigned long address) { if (nr_ioapics >= MAX_IO_APICS) { printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " @@ -4086,20 +4040,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) /* Enable IOAPIC early just for system timer */ void __init pre_init_apic_IRQ0(void) { - struct irq_cfg *cfg; + struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; printk(KERN_INFO "Early APIC setup for system timer0\n"); #ifndef CONFIG_SMP physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); #endif - /* Make sure the irq descriptor is set up */ - cfg = alloc_irq_and_cfg_at(0, 0); - setup_local_APIC(); - add_pin_to_irq_node(cfg, 0, 0, 0); - set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); - - setup_ioapic_irq(0, 0, 0, cfg, 0, 0); + io_apic_setup_irq_pin(0, 0, &attr); + irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, + "edge"); } diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 08385e090a6f..cce91bf26676 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, local_irq_restore(flags); } +#ifdef CONFIG_X86_32 + void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) { @@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, local_irq_save(flags); for_each_cpu(query_cpu, mask) __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); + early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); local_irq_restore(flags); } @@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, if (query_cpu == this_cpu) continue; __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); + early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); } local_irq_restore(flags); } -#ifdef CONFIG_X86_32 - /* * This is only used on smaller machines. */ diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 960f26ab5c9f..6273eee5134b 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask return physids_promote(0xFUL, retmap); } -static inline int numaq_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_2_logical_apicid[cpu]; -} - /* * Supporting over 60 cpus on NUMA-Q requires a locality-dependent * cpu to APIC ID relation to properly interact with the intelligent @@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid) return logical_apicid >> 4; } +static int numaq_numa_cpu_node(int cpu) +{ + int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + + if (logical_apicid != BAD_APICID) + return numaq_apicid_to_node(logical_apicid); + return NUMA_NO_NODE; +} + static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) { int node = numaq_apicid_to_node(logical_apicid); @@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = { .ioapic_phys_id_map = numaq_ioapic_phys_id_map, .setup_apic_routing = numaq_setup_apic_routing, .multi_timer_check = numaq_multi_timer_check, - .apicid_to_node = numaq_apicid_to_node, - .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, .cpu_present_to_apicid = numaq_cpu_present_to_apicid, .apicid_to_cpu_present = numaq_apicid_to_cpu_present, .setup_portio_remap = numaq_setup_portio_remap, @@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, + .x86_32_numa_cpu_node = numaq_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 99d2fe016084..fc84c7b61108 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void) apic->setup_apic_routing(); } +static int default_x86_32_early_logical_apicid(int cpu) +{ + return 1 << cpu; +} + static void setup_apic_flat_routing(void) { #ifdef CONFIG_X86_IO_APIC @@ -130,8 +135,6 @@ struct apic apic_default = { .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = setup_apic_flat_routing, .multi_timer_check = NULL, - .apicid_to_node = default_apicid_to_node, - .cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, @@ -167,6 +170,9 @@ struct apic apic_default = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, + .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; extern struct apic apic_numaq; diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 9b419263d90d..e4b8059b414a 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit) return 1; } -static void summit_init_apic_ldr(void) +static int summit_early_logical_apicid(int cpu) { - unsigned long val, id; int count = 0; - u8 my_id = (u8)hard_smp_processor_id(); + u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); u8 my_cluster = APIC_CLUSTER(my_id); #ifdef CONFIG_SMP u8 lid; @@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void) /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = nr_cpu_ids; --i >= 0; ) { - lid = cpu_2_logical_apicid[i]; + lid = early_per_cpu(x86_cpu_to_logical_apicid, i); if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) ++count; } @@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void) /* We only have a 4 wide bitmap in cluster mode. If a deranged * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); - id = my_cluster | (1UL << count); + return my_cluster | (1UL << count); +} + +static void summit_init_apic_ldr(void) +{ + int cpu = smp_processor_id(); + unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + unsigned long val; + apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); @@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void) nr_ioapics); } -static int summit_apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} - -/* Mapping from cpu number to logical apicid */ -static inline int summit_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - static int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) @@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) * The cpus in the mask must all be on the apic cluster. */ for_each_cpu(cpu, cpumask) { - int new_apicid = summit_cpu_to_logical_apicid(cpu); + int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { printk("%s: Not a valid mask!\n", __func__); @@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int apicid = summit_cpu_to_logical_apicid(0); + int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) @@ -528,8 +514,6 @@ struct apic apic_summit = { .ioapic_phys_id_map = summit_ioapic_phys_id_map, .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = summit_apicid_to_node, - .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = NULL, @@ -565,4 +549,7 @@ struct apic apic_summit = { .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, + + .x86_32_early_logical_apicid = summit_early_logical_apicid, + .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index cf69c59f4910..90949bbd566d 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8972f38c5ced..c7e6d6645bf4 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index bd16b58b8850..3c289281394c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = { .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 0e4f24c2a746..9079926a5b18 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -227,6 +227,7 @@ #include <linux/suspend.h> #include <linux/kthread.h> #include <linux/jiffies.h> +#include <linux/acpi.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -975,20 +976,10 @@ recalc: static void apm_power_off(void) { - unsigned char po_bios_call[] = { - 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ - 0x8e, 0xd0, /* movw ax,ss */ - 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ - 0xb8, 0x07, 0x53, /* movw $0x5307,ax */ - 0xbb, 0x01, 0x00, /* movw $0x0001,bx */ - 0xb9, 0x03, 0x00, /* movw $0x0003,cx */ - 0xcd, 0x15 /* int $0x15 */ - }; - /* Some bioses don't like being called from CPU != 0 */ if (apm_info.realmode_power_off) { set_cpus_allowed_ptr(current, cpumask_of(0)); - machine_real_restart(po_bios_call, sizeof(po_bios_call)); + machine_real_restart(MRR_APM); } else { (void)set_system_power_state(APM_STATE_OFF); } @@ -2331,12 +2322,11 @@ static int __init apm_init(void) apm_info.disabled = 1; return -ENODEV; } - if (pm_flags & PM_ACPI) { + if (!acpi_disabled) { printk(KERN_NOTICE "apm: overridden by ACPI.\n"); apm_info.disabled = 1; return -ENODEV; } - pm_flags |= PM_APM; /* * Set up the long jump entry point to the APM BIOS, which is called @@ -2428,7 +2418,6 @@ static void __exit apm_exit(void) kthread_stop(kapmd_task); kapmd_task = NULL; } - pm_flags &= ~PM_APM; } module_init(apm_init); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index cfa82c899f47..4f13fafc5264 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -1,5 +1,70 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ +#define COMPILE_OFFSETS + +#include <linux/crypto.h> +#include <linux/sched.h> +#include <linux/stddef.h> +#include <linux/hardirq.h> +#include <linux/suspend.h> +#include <linux/kbuild.h> +#include <asm/processor.h> +#include <asm/thread_info.h> +#include <asm/sigframe.h> +#include <asm/bootparam.h> +#include <asm/suspend.h> + +#ifdef CONFIG_XEN +#include <xen/interface/xen.h> +#endif + #ifdef CONFIG_X86_32 # include "asm-offsets_32.c" #else # include "asm-offsets_64.c" #endif + +void common(void) { + BLANK(); + OFFSET(TI_flags, thread_info, flags); + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); + OFFSET(TI_preempt_count, thread_info, preempt_count); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); + + BLANK(); + OFFSET(pbe_address, pbe, address); + OFFSET(pbe_orig_address, pbe, orig_address); + OFFSET(pbe_next, pbe, next); + +#ifdef CONFIG_PARAVIRT + BLANK(); + OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); + OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); + OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); + OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); + OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); + OFFSET(PV_CPU_iret, pv_cpu_ops, iret); + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); + OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); +#endif + +#ifdef CONFIG_XEN + BLANK(); + OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); + OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); +#endif + + BLANK(); + OFFSET(BP_scratch, boot_params, scratch); + OFFSET(BP_loadflags, boot_params, hdr.loadflags); + OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); + OFFSET(BP_version, boot_params, hdr.version); + OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); +} diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 1a4088dda37a..c29d631af6fc 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -1,26 +1,4 @@ -/* - * Generate definitions needed by assembly language modules. - * This code generates raw asm output which is post-processed - * to extract and format the required data. - */ - -#include <linux/crypto.h> -#include <linux/sched.h> -#include <linux/signal.h> -#include <linux/personality.h> -#include <linux/suspend.h> -#include <linux/kbuild.h> #include <asm/ucontext.h> -#include <asm/sigframe.h> -#include <asm/pgtable.h> -#include <asm/fixmap.h> -#include <asm/processor.h> -#include <asm/thread_info.h> -#include <asm/bootparam.h> -#include <asm/elf.h> -#include <asm/suspend.h> - -#include <xen/interface/xen.h> #include <linux/lguest.h> #include "../../../drivers/lguest/lg.h" @@ -51,21 +29,10 @@ void foo(void) OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); BLANK(); - OFFSET(TI_task, thread_info, task); - OFFSET(TI_exec_domain, thread_info, exec_domain); - OFFSET(TI_flags, thread_info, flags); - OFFSET(TI_status, thread_info, status); - OFFSET(TI_preempt_count, thread_info, preempt_count); - OFFSET(TI_addr_limit, thread_info, addr_limit); - OFFSET(TI_restart_block, thread_info, restart_block); OFFSET(TI_sysenter_return, thread_info, sysenter_return); OFFSET(TI_cpu, thread_info, cpu); BLANK(); - OFFSET(GDS_size, desc_ptr, size); - OFFSET(GDS_address, desc_ptr, address); - BLANK(); - OFFSET(PT_EBX, pt_regs, bx); OFFSET(PT_ECX, pt_regs, cx); OFFSET(PT_EDX, pt_regs, dx); @@ -85,42 +52,13 @@ void foo(void) OFFSET(PT_OLDSS, pt_regs, ss); BLANK(); - OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext); BLANK(); - OFFSET(pbe_address, pbe, address); - OFFSET(pbe_orig_address, pbe, orig_address); - OFFSET(pbe_next, pbe, next); - /* Offset from the sysenter stack to tss.sp0 */ DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - sizeof(struct tss_struct)); - DEFINE(PAGE_SIZE_asm, PAGE_SIZE); - DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); - DEFINE(THREAD_SIZE_asm, THREAD_SIZE); - - OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); - -#ifdef CONFIG_PARAVIRT - BLANK(); - OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); - OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); - OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); - OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); - OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); - OFFSET(PV_CPU_iret, pv_cpu_ops, iret); - OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); - OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); -#endif - -#ifdef CONFIG_XEN - BLANK(); - OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); - OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); -#endif - #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) BLANK(); OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); @@ -139,11 +77,4 @@ void foo(void) OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); #endif - - BLANK(); - OFFSET(BP_scratch, boot_params, scratch); - OFFSET(BP_loadflags, boot_params, hdr.loadflags); - OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); - OFFSET(BP_version, boot_params, hdr.version); - OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); } diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 4a6aeedcd965..e72a1194af22 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -1,27 +1,4 @@ -/* - * Generate definitions needed by assembly language modules. - * This code generates raw asm output which is post-processed to extract - * and format the required data. - */ -#define COMPILE_OFFSETS - -#include <linux/crypto.h> -#include <linux/sched.h> -#include <linux/stddef.h> -#include <linux/errno.h> -#include <linux/hardirq.h> -#include <linux/suspend.h> -#include <linux/kbuild.h> -#include <asm/processor.h> -#include <asm/segment.h> -#include <asm/thread_info.h> #include <asm/ia32.h> -#include <asm/bootparam.h> -#include <asm/suspend.h> - -#include <xen/interface/xen.h> - -#include <asm/sigframe.h> #define __NO_STUBS 1 #undef __SYSCALL @@ -33,41 +10,19 @@ static char syscalls[] = { int main(void) { -#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry)) - ENTRY(state); - ENTRY(flags); - ENTRY(pid); - BLANK(); -#undef ENTRY -#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry)) - ENTRY(flags); - ENTRY(addr_limit); - ENTRY(preempt_count); - ENTRY(status); -#ifdef CONFIG_IA32_EMULATION - ENTRY(sysenter_return); -#endif - BLANK(); -#undef ENTRY #ifdef CONFIG_PARAVIRT - BLANK(); - OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); - OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); - OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); - OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); - OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); - OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); - OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); - OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); + BLANK(); #endif - #ifdef CONFIG_IA32_EMULATION -#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry)) + OFFSET(TI_sysenter_return, thread_info, sysenter_return); + BLANK(); + +#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry) ENTRY(ax); ENTRY(bx); ENTRY(cx); @@ -79,15 +34,12 @@ int main(void) ENTRY(ip); BLANK(); #undef ENTRY - DEFINE(IA32_RT_SIGFRAME_sigcontext, - offsetof (struct rt_sigframe_ia32, uc.uc_mcontext)); + + OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); BLANK(); #endif - DEFINE(pbe_address, offsetof(struct pbe, address)); - DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); - DEFINE(pbe_next, offsetof(struct pbe, next)); - BLANK(); -#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry)) + +#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) ENTRY(bx); ENTRY(bx); ENTRY(cx); @@ -107,7 +59,8 @@ int main(void) ENTRY(flags); BLANK(); #undef ENTRY -#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry)) + +#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry) ENTRY(cr0); ENTRY(cr2); ENTRY(cr3); @@ -115,26 +68,11 @@ int main(void) ENTRY(cr8); BLANK(); #undef ENTRY - DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); - BLANK(); - DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); - BLANK(); - DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); + OFFSET(TSS_ist, tss_struct, x86_tss.ist); BLANK(); - OFFSET(BP_scratch, boot_params, scratch); - OFFSET(BP_loadflags, boot_params, hdr.loadflags); - OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); - OFFSET(BP_version, boot_params, hdr.version); - OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); - BLANK(); - DEFINE(PAGE_SIZE_asm, PAGE_SIZE); -#ifdef CONFIG_XEN - BLANK(); - OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); - OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); -#undef ENTRY -#endif + DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); + return 0; } diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 13a389179514..452932d34730 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) addr += size; } - printk(KERN_INFO "Scanning %d areas for low memory corruption\n", - num_scan_areas); + if (num_scan_areas) + printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); } @@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) { check_for_bios_corruption(); schedule_delayed_work(&bios_check_work, - round_jiffies_relative(corruption_check_period*HZ)); + round_jiffies_relative(corruption_check_period*HZ)); } static int start_periodic_check_for_corruption(void) { - if (!memory_corruption_check || corruption_check_period == 0) + if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) return 0; printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb83c5a..3ecece0217ef 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) } #endif -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA +/* + * To workaround broken NUMA config. Read the comment in + * srat_detect_node(). + */ static int __cpuinit nearby_node(int apicid) { int i, node; for (i = apicid - 1; i >= 0; i--) { - node = apicid_to_node[i]; + node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { - node = apicid_to_node[i]; + node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } @@ -261,7 +265,7 @@ static int __cpuinit nearby_node(int apicid) #ifdef CONFIG_X86_HT static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) { - u32 nodes; + u32 nodes, cores_per_cu = 1; u8 node_id; int cpu = smp_processor_id(); @@ -276,6 +280,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* get compute unit information */ smp_num_siblings = ((ebx >> 8) & 3) + 1; c->compute_unit_id = ebx & 0xff; + cores_per_cu += ((ebx >> 8) & 3); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; @@ -288,15 +293,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* fixup multi-node processor information */ if (nodes > 1) { u32 cores_per_node; + u32 cus_per_node; set_cpu_cap(c, X86_FEATURE_AMD_DCM); cores_per_node = c->x86_max_cores / nodes; + cus_per_node = cores_per_node / cores_per_cu; /* store NodeID, use llc_shared_map to store sibling info */ per_cpu(cpu_llc_id, cpu) = node_id; - /* core id to be in range from 0 to (cores_per_node - 1) */ - c->cpu_core_id = c->cpu_core_id % cores_per_node; + /* core id has to be in the [0 .. cores_per_node - 1] range */ + c->cpu_core_id %= cores_per_node; + c->compute_unit_id %= cus_per_node; } } #endif @@ -334,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id); static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA int cpu = smp_processor_id(); int node; unsigned apicid = c->apicid; - node = per_cpu(cpu_llc_id, cpu); + node = numa_cpu_node(cpu); + if (node == NUMA_NO_NODE) + node = per_cpu(cpu_llc_id, cpu); - if (apicid_to_node[apicid] != NUMA_NO_NODE) - node = apicid_to_node[apicid]; if (!node_online(node)) { - /* Two possibilities here: - - The CPU is missing memory and no node was created. - In that case try picking one from a nearby CPU - - The APIC IDs differ from the HyperTransport node IDs - which the K8 northbridge parsing fills in. - Assume they are all increased by a constant offset, - but in the same order as the HT nodeids. - If that doesn't result in a usable node fall back to the - path for the previous case. */ - + /* + * Two possibilities here: + * + * - The CPU is missing memory and no node was created. In + * that case try picking one from a nearby CPU. + * + * - The APIC IDs differ from the HyperTransport node IDs + * which the K8 northbridge parsing fills in. Assume + * they are all increased by a constant offset, but in + * the same order as the HT nodeids. If that doesn't + * result in a usable node fall back to the path for the + * previous case. + * + * This workaround operates directly on the mapping between + * APIC ID and NUMA node, assuming certain relationship + * between APIC ID, HT node ID and NUMA topology. As going + * through CPU mapping may alter the outcome, directly + * access __apicid_to_node[]. + */ int ht_nodeid = c->initial_apicid; if (ht_nodeid >= 0 && - apicid_to_node[ht_nodeid] != NUMA_NO_NODE) - node = apicid_to_node[ht_nodeid]; + __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) + node = __apicid_to_node[ht_nodeid]; /* Pick a nearby node */ if (!node_online(node)) node = nearby_node(apicid); @@ -594,6 +611,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } #endif + + /* As a rule processors have APIC timer running in deep C states */ + if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) + set_cpu_cap(c, X86_FEATURE_ARAT); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834396bd..e2ced0074a45 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -675,7 +675,7 @@ void __init early_cpu_init(void) const struct cpu_dev *const *cdev; int count = 0; -#ifdef PROCESSOR_SELECT +#ifdef CONFIG_PROCESSOR_SELECT printk(KERN_INFO "KERNEL supported cpus:\n"); #endif @@ -687,7 +687,7 @@ void __init early_cpu_init(void) cpu_devs[count] = cpudev; count++; -#ifdef PROCESSOR_SELECT +#ifdef CONFIG_PROCESSOR_SELECT { unsigned int j; @@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) select_idle_routine(c); -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif } diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4f6f679f2799..4a5a42b842ad 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c @@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) cmd_incomplete: iowrite16(0, &pcch_hdr->status); spin_unlock(&pcc_lock); - return -EINVAL; + return 0; } static int pcc_cpufreq_target(struct cpufreq_policy *policy, diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d16c2c53d6bf..df86bc8c859d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); - int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; /* Don't do the funky fallback heuristics the AMD version employs for now. */ - node = apicid_to_node[apicid]; + node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ec2c19a7b8ef..1ce1af2899df 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, struct _cache_attr { struct attribute attr; - ssize_t (*show)(struct _cpuid4_info *, char *); - ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); + ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); + ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, + unsigned int); }; #ifdef CONFIG_AMD_NB @@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, #define SHOW_CACHE_DISABLE(slot) \ static ssize_t \ -show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ +show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ + unsigned int cpu) \ { \ return show_cache_disable(this_leaf, buf, slot); \ } @@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, #define STORE_CACHE_DISABLE(slot) \ static ssize_t \ store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ - const char *buf, size_t count) \ + const char *buf, size_t count, \ + unsigned int cpu) \ { \ return store_cache_disable(this_leaf, buf, count, slot); \ } @@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1); +static ssize_t +show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) +{ + if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return -EINVAL; + + return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); +} + +static ssize_t +store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, + unsigned int cpu) +{ + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return -EINVAL; + + if (strict_strtoul(buf, 16, &val) < 0) + return -EINVAL; + + if (amd_set_subcaches(cpu, val)) + return -EINVAL; + + return count; +} + +static struct _cache_attr subcaches = + __ATTR(subcaches, 0644, show_subcaches, store_subcaches); + #else /* CONFIG_AMD_NB */ #define amd_init_l3_cache(x, y) #endif /* CONFIG_AMD_NB */ @@ -532,9 +568,9 @@ static int __cpuinit cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) { - union _cpuid4_leaf_eax eax; - union _cpuid4_leaf_ebx ebx; - union _cpuid4_leaf_ecx ecx; + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; unsigned edx; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { @@ -732,11 +768,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) struct cpuinfo_x86 *c = &cpu_data(cpu); if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { - for_each_cpu(i, c->llc_shared_map) { + for_each_cpu(i, cpu_llc_shared_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, i)) continue; this_leaf = CPUID4_INFO_IDX(i, index); - for_each_cpu(sibling, c->llc_shared_map) { + for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { if (!cpu_online(sibling)) continue; set_bit(sibling, this_leaf->shared_cpu_map); @@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) #define show_one_plus(file_name, object, val) \ -static ssize_t show_##file_name \ - (struct _cpuid4_info *this_leaf, char *buf) \ +static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ + unsigned int cpu) \ { \ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ } @@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); -static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) +static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, + unsigned int cpu) { return sprintf(buf, "%luK\n", this_leaf->size / 1024); } @@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, return n; } -static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf) +static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, + unsigned int cpu) { return show_shared_cpu_map_func(leaf, 0, buf); } -static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf) +static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, + unsigned int cpu) { return show_shared_cpu_map_func(leaf, 1, buf); } -static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) +static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, + unsigned int cpu) { switch (this_leaf->eax.split.type) { case CACHE_TYPE_DATA: @@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; + if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + n += 1; + attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); if (attrs == NULL) return attrs = default_attrs; @@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) attrs[n++] = &cache_disable_1.attr; } + if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + attrs[n++] = &subcaches.attr; + return attrs; } #endif @@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) ret = fattr->show ? fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), - buf) : + buf, this_leaf->cpu) : 0; return ret; } @@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, ret = fattr->store ? fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), - buf, count) : + buf, count, this_leaf->cpu) : 0; return ret; } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5bf2fac52aca..167f97b5596e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) int i, err = 0; struct threshold_bank *b = NULL; char name[32]; -#ifdef CONFIG_SMP - struct cpuinfo_x86 *c = &cpu_data(cpu); -#endif sprintf(name, "threshold_bank%i", bank); #ifdef CONFIG_SMP if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ - i = cpumask_first(c->llc_shared_map); + i = cpumask_first(cpu_llc_shared_mask(cpu)); /* first core not up yet */ if (cpu_data(i).cpu_core_id) @@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out; - cpumask_copy(b->cpus, c->llc_shared_map); + cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu)); per_cpu(threshold_banks, cpu)[bank] = b; goto out; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d977a2ea693..26604188aa49 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -30,6 +30,7 @@ #include <asm/stacktrace.h> #include <asm/nmi.h> #include <asm/compat.h> +#include <asm/smp.h> #if 0 #undef wrmsrl @@ -93,6 +94,8 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; +struct intel_percore; + #define MAX_LBR_ENTRIES 16 struct cpu_hw_events { @@ -128,6 +131,13 @@ struct cpu_hw_events { struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; /* + * Intel percore register state. + * Coordinate shared resources between HT threads. + */ + int percore_used; /* Used by this CPU? */ + struct intel_percore *per_core; + + /* * AMD specific bits */ struct amd_nb *amd_nb; @@ -166,8 +176,10 @@ struct cpu_hw_events { /* * Constraint on the Event code + UMask */ -#define PEBS_EVENT_CONSTRAINT(c, n) \ +#define INTEL_UEVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) +#define PEBS_EVENT_CONSTRAINT(c, n) \ + INTEL_UEVENT_CONSTRAINT(c, n) #define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) @@ -175,6 +187,28 @@ struct cpu_hw_events { #define for_each_event_constraint(e, c) \ for ((e) = (c); (e)->weight; (e)++) +/* + * Extra registers for specific events. + * Some events need large masks and require external MSRs. + * Define a mapping to these extra registers. + */ +struct extra_reg { + unsigned int event; + unsigned int msr; + u64 config_mask; + u64 valid_mask; +}; + +#define EVENT_EXTRA_REG(e, ms, m, vm) { \ + .event = (e), \ + .msr = (ms), \ + .config_mask = (m), \ + .valid_mask = (vm), \ + } +#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ + EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) +#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) + union perf_capabilities { struct { u64 lbr_format : 6; @@ -219,6 +253,7 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; + struct event_constraint *percore_constraints; void (*quirks)(void); int perfctr_second_write; @@ -247,6 +282,11 @@ struct x86_pmu { */ unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ int lbr_nr; /* hardware stack size */ + + /* + * Extra registers for events + */ + struct extra_reg *extra_regs; }; static struct x86_pmu x86_pmu __read_mostly; @@ -271,6 +311,10 @@ static u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; +static u64 __read_mostly hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; /* * Propagate event elapsed time into the generic event. @@ -298,7 +342,7 @@ x86_perf_event_update(struct perf_event *event) */ again: prev_raw_count = local64_read(&hwc->prev_count); - rdmsrl(hwc->event_base + idx, new_raw_count); + rdmsrl(hwc->event_base, new_raw_count); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) @@ -321,6 +365,49 @@ again: return new_raw_count; } +/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ +static inline int x86_pmu_addr_offset(int index) +{ + if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) + return index << 1; + return index; +} + +static inline unsigned int x86_pmu_config_addr(int index) +{ + return x86_pmu.eventsel + x86_pmu_addr_offset(index); +} + +static inline unsigned int x86_pmu_event_addr(int index) +{ + return x86_pmu.perfctr + x86_pmu_addr_offset(index); +} + +/* + * Find and validate any extra registers to set up. + */ +static int x86_pmu_extra_regs(u64 config, struct perf_event *event) +{ + struct extra_reg *er; + + event->hw.extra_reg = 0; + event->hw.extra_config = 0; + + if (!x86_pmu.extra_regs) + return 0; + + for (er = x86_pmu.extra_regs; er->msr; er++) { + if (er->event != (config & er->config_mask)) + continue; + if (event->attr.config1 & ~er->valid_mask) + return -EINVAL; + event->hw.extra_reg = er->msr; + event->hw.extra_config = event->attr.config1; + break; + } + return 0; +} + static atomic_t active_events; static DEFINE_MUTEX(pmc_reserve_mutex); @@ -331,12 +418,12 @@ static bool reserve_pmc_hardware(void) int i; for (i = 0; i < x86_pmu.num_counters; i++) { - if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) + if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) goto perfctr_fail; } for (i = 0; i < x86_pmu.num_counters; i++) { - if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) + if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) goto eventsel_fail; } @@ -344,13 +431,13 @@ static bool reserve_pmc_hardware(void) eventsel_fail: for (i--; i >= 0; i--) - release_evntsel_nmi(x86_pmu.eventsel + i); + release_evntsel_nmi(x86_pmu_config_addr(i)); i = x86_pmu.num_counters; perfctr_fail: for (i--; i >= 0; i--) - release_perfctr_nmi(x86_pmu.perfctr + i); + release_perfctr_nmi(x86_pmu_event_addr(i)); return false; } @@ -360,8 +447,8 @@ static void release_pmc_hardware(void) int i; for (i = 0; i < x86_pmu.num_counters; i++) { - release_perfctr_nmi(x86_pmu.perfctr + i); - release_evntsel_nmi(x86_pmu.eventsel + i); + release_perfctr_nmi(x86_pmu_event_addr(i)); + release_evntsel_nmi(x86_pmu_config_addr(i)); } } @@ -382,7 +469,7 @@ static bool check_hw_exists(void) * complain and bail. */ for (i = 0; i < x86_pmu.num_counters; i++) { - reg = x86_pmu.eventsel + i; + reg = x86_pmu_config_addr(i); ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; @@ -407,8 +494,8 @@ static bool check_hw_exists(void) * that don't trap on the MSR access and always return 0s. */ val = 0xabcdUL; - ret = checking_wrmsrl(x86_pmu.perfctr, val); - ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); + ret = checking_wrmsrl(x86_pmu_event_addr(0), val); + ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); if (ret || val != val_new) goto msr_fail; @@ -442,8 +529,9 @@ static inline int x86_pmu_initialized(void) } static inline int -set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) +set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) { + struct perf_event_attr *attr = &event->attr; unsigned int cache_type, cache_op, cache_result; u64 config, val; @@ -470,8 +558,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) return -EINVAL; hwc->config |= val; - - return 0; + attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; + return x86_pmu_extra_regs(val, event); } static int x86_setup_perfctr(struct perf_event *event) @@ -496,10 +584,10 @@ static int x86_setup_perfctr(struct perf_event *event) } if (attr->type == PERF_TYPE_RAW) - return 0; + return x86_pmu_extra_regs(event->attr.config, event); if (attr->type == PERF_TYPE_HW_CACHE) - return set_ext_hw_attr(hwc, attr); + return set_ext_hw_attr(hwc, event); if (attr->config >= x86_pmu.max_events) return -EINVAL; @@ -617,11 +705,11 @@ static void x86_pmu_disable_all(void) if (!test_bit(idx, cpuc->active_mask)) continue; - rdmsrl(x86_pmu.eventsel + idx, val); + rdmsrl(x86_pmu_config_addr(idx), val); if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) continue; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; - wrmsrl(x86_pmu.eventsel + idx, val); + wrmsrl(x86_pmu_config_addr(idx), val); } } @@ -642,21 +730,26 @@ static void x86_pmu_disable(struct pmu *pmu) x86_pmu.disable_all(); } +static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, + u64 enable_mask) +{ + if (hwc->extra_reg) + wrmsrl(hwc->extra_reg, hwc->extra_config); + wrmsrl(hwc->config_base, hwc->config | enable_mask); +} + static void x86_pmu_enable_all(int added) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { - struct perf_event *event = cpuc->events[idx]; - u64 val; + struct hw_perf_event *hwc = &cpuc->events[idx]->hw; if (!test_bit(idx, cpuc->active_mask)) continue; - val = event->hw.config; - val |= ARCH_PERFMON_EVENTSEL_ENABLE; - wrmsrl(x86_pmu.eventsel + idx, val); + __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } } @@ -821,15 +914,10 @@ static inline void x86_assign_hw_event(struct perf_event *event, hwc->event_base = 0; } else if (hwc->idx >= X86_PMC_IDX_FIXED) { hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - /* - * We set it so that event_base + idx in wrmsr/rdmsr maps to - * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: - */ - hwc->event_base = - MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; + hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; } else { - hwc->config_base = x86_pmu.eventsel; - hwc->event_base = x86_pmu.perfctr; + hwc->config_base = x86_pmu_config_addr(hwc->idx); + hwc->event_base = x86_pmu_event_addr(hwc->idx); } } @@ -915,17 +1003,11 @@ static void x86_pmu_enable(struct pmu *pmu) x86_pmu.enable_all(added); } -static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, - u64 enable_mask) -{ - wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask); -} - static inline void x86_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base + hwc->idx, hwc->config); + wrmsrl(hwc->config_base, hwc->config); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); @@ -978,7 +1060,7 @@ x86_perf_event_set_period(struct perf_event *event) */ local64_set(&hwc->prev_count, (u64)-left); - wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Due to erratum on certan cpu we need @@ -986,7 +1068,7 @@ x86_perf_event_set_period(struct perf_event *event) * is updated properly */ if (x86_pmu.perfctr_second_write) { - wrmsrl(hwc->event_base + idx, + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); } @@ -1113,8 +1195,8 @@ void perf_event_print_debug(void) pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); for (idx = 0; idx < x86_pmu.num_counters; idx++) { - rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); - rdmsrl(x86_pmu.perfctr + idx, pmc_count); + rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); + rdmsrl(x86_pmu_event_addr(idx), pmc_count); prev_left = per_cpu(pmc_prev_left[idx], cpu); @@ -1389,7 +1471,7 @@ static void __init pmu_check_apic(void) pr_info("no hardware sampling interrupt available.\n"); } -int __init init_hw_perf_events(void) +static int __init init_hw_perf_events(void) { struct event_constraint *c; int err; @@ -1608,7 +1690,7 @@ out: return ret; } -int x86_pmu_event_init(struct perf_event *event) +static int x86_pmu_event_init(struct perf_event *event) { struct pmu *tmp; int err; diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 67e2202a6039..461f62bbd774 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -127,6 +127,11 @@ static int amd_pmu_hw_config(struct perf_event *event) /* * AMD64 events are detected based on their event codes. */ +static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) +{ + return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); +} + static inline int amd_is_nb_event(struct hw_perf_event *hwc) { return (hwc->config & 0xe0) == 0xe0; @@ -385,13 +390,181 @@ static __initconst const struct x86_pmu amd_pmu = { .cpu_dead = amd_pmu_cpu_dead, }; +/* AMD Family 15h */ + +#define AMD_EVENT_TYPE_MASK 0x000000F0ULL + +#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL +#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL +#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL +#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL +#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL +#define AMD_EVENT_EX_LS 0x000000C0ULL +#define AMD_EVENT_DE 0x000000D0ULL +#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL + +/* + * AMD family 15h event code/PMC mappings: + * + * type = event_code & 0x0F0: + * + * 0x000 FP PERF_CTL[5:3] + * 0x010 FP PERF_CTL[5:3] + * 0x020 LS PERF_CTL[5:0] + * 0x030 LS PERF_CTL[5:0] + * 0x040 DC PERF_CTL[5:0] + * 0x050 DC PERF_CTL[5:0] + * 0x060 CU PERF_CTL[2:0] + * 0x070 CU PERF_CTL[2:0] + * 0x080 IC/DE PERF_CTL[2:0] + * 0x090 IC/DE PERF_CTL[2:0] + * 0x0A0 --- + * 0x0B0 --- + * 0x0C0 EX/LS PERF_CTL[5:0] + * 0x0D0 DE PERF_CTL[2:0] + * 0x0E0 NB NB_PERF_CTL[3:0] + * 0x0F0 NB NB_PERF_CTL[3:0] + * + * Exceptions: + * + * 0x003 FP PERF_CTL[3] + * 0x00B FP PERF_CTL[3] + * 0x00D FP PERF_CTL[3] + * 0x023 DE PERF_CTL[2:0] + * 0x02D LS PERF_CTL[3] + * 0x02E LS PERF_CTL[3,0] + * 0x043 CU PERF_CTL[2:0] + * 0x045 CU PERF_CTL[2:0] + * 0x046 CU PERF_CTL[2:0] + * 0x054 CU PERF_CTL[2:0] + * 0x055 CU PERF_CTL[2:0] + * 0x08F IC PERF_CTL[0] + * 0x187 DE PERF_CTL[0] + * 0x188 DE PERF_CTL[0] + * 0x0DB EX PERF_CTL[5:0] + * 0x0DC LS PERF_CTL[5:0] + * 0x0DD LS PERF_CTL[5:0] + * 0x0DE LS PERF_CTL[5:0] + * 0x0DF LS PERF_CTL[5:0] + * 0x1D6 EX PERF_CTL[5:0] + * 0x1D8 EX PERF_CTL[5:0] + */ + +static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); +static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); +static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); +static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); +static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); +static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); + +static struct event_constraint * +amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) +{ + unsigned int event_code = amd_get_event_code(&event->hw); + + switch (event_code & AMD_EVENT_TYPE_MASK) { + case AMD_EVENT_FP: + switch (event_code) { + case 0x003: + case 0x00B: + case 0x00D: + return &amd_f15_PMC3; + default: + return &amd_f15_PMC53; + } + case AMD_EVENT_LS: + case AMD_EVENT_DC: + case AMD_EVENT_EX_LS: + switch (event_code) { + case 0x023: + case 0x043: + case 0x045: + case 0x046: + case 0x054: + case 0x055: + return &amd_f15_PMC20; + case 0x02D: + return &amd_f15_PMC3; + case 0x02E: + return &amd_f15_PMC30; + default: + return &amd_f15_PMC50; + } + case AMD_EVENT_CU: + case AMD_EVENT_IC_DE: + case AMD_EVENT_DE: + switch (event_code) { + case 0x08F: + case 0x187: + case 0x188: + return &amd_f15_PMC0; + case 0x0DB ... 0x0DF: + case 0x1D6: + case 0x1D8: + return &amd_f15_PMC50; + default: + return &amd_f15_PMC20; + } + case AMD_EVENT_NB: + /* not yet implemented */ + return &emptyconstraint; + default: + return &emptyconstraint; + } +} + +static __initconst const struct x86_pmu amd_pmu_f15h = { + .name = "AMD Family 15h", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, + .hw_config = amd_pmu_hw_config, + .schedule_events = x86_schedule_events, + .eventsel = MSR_F15H_PERF_CTL, + .perfctr = MSR_F15H_PERF_CTR, + .event_map = amd_pmu_event_map, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_counters = 6, + .cntval_bits = 48, + .cntval_mask = (1ULL << 48) - 1, + .apic = 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, + .get_event_constraints = amd_get_event_constraints_f15h, + /* nortbridge counters not yet implemented: */ +#if 0 + .put_event_constraints = amd_put_event_constraints, + + .cpu_prepare = amd_pmu_cpu_prepare, + .cpu_starting = amd_pmu_cpu_starting, + .cpu_dead = amd_pmu_cpu_dead, +#endif +}; + static __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ if (boot_cpu_data.x86 < 6) return -ENODEV; - x86_pmu = amd_pmu; + /* + * If core performance counter extensions exists, it must be + * family 15h, otherwise fail. See x86_pmu_addr_offset(). + */ + switch (boot_cpu_data.x86) { + case 0x15: + if (!cpu_has_perfctr_core) + return -ENODEV; + x86_pmu = amd_pmu_f15h; + break; + default: + if (cpu_has_perfctr_core) + return -ENODEV; + x86_pmu = amd_pmu; + break; + } /* Events are common for all AMDs */ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 008835c1d79c..8fc2b2cee1da 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1,5 +1,27 @@ #ifdef CONFIG_CPU_SUP_INTEL +#define MAX_EXTRA_REGS 2 + +/* + * Per register state. + */ +struct er_account { + int ref; /* reference count */ + unsigned int extra_reg; /* extra MSR number */ + u64 extra_config; /* extra MSR config */ +}; + +/* + * Per core state + * This used to coordinate shared registers for HT threads. + */ +struct intel_percore { + raw_spinlock_t lock; /* protect structure */ + struct er_account regs[MAX_EXTRA_REGS]; + int refcnt; /* number of threads */ + unsigned core_id; +}; + /* * Intel PerfMon, used on Core and later. */ @@ -64,6 +86,18 @@ static struct event_constraint intel_nehalem_event_constraints[] = EVENT_CONSTRAINT_END }; +static struct extra_reg intel_nehalem_extra_regs[] = +{ + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), + EVENT_EXTRA_END +}; + +static struct event_constraint intel_nehalem_percore_constraints[] = +{ + INTEL_EVENT_CONSTRAINT(0xb7, 0), + EVENT_CONSTRAINT_END +}; + static struct event_constraint intel_westmere_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ @@ -76,6 +110,33 @@ static struct event_constraint intel_westmere_event_constraints[] = EVENT_CONSTRAINT_END }; +static struct event_constraint intel_snb_event_constraints[] = +{ + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ + INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ + INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ + INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ + INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ + INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ + EVENT_CONSTRAINT_END +}; + +static struct extra_reg intel_westmere_extra_regs[] = +{ + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), + EVENT_EXTRA_END +}; + +static struct event_constraint intel_westmere_percore_constraints[] = +{ + INTEL_EVENT_CONSTRAINT(0xb7, 0), + INTEL_EVENT_CONSTRAINT(0xbb, 0), + EVENT_CONSTRAINT_END +}; + static struct event_constraint intel_gen_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ @@ -89,6 +150,106 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } +static __initconst const u64 snb_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ + [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ + [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(LL ) ] = { + /* + * TBD: Need Off-core Response Performance Monitoring support + */ + [ C(OP_READ) ] = { + /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, + }, + [ C(OP_WRITE) ] = { + /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, + }, + [ C(OP_PREFETCH) ] = { + /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ + [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ + [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ + [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ + [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + static __initconst const u64 westmere_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -124,16 +285,26 @@ static __initconst const u64 westmere_hw_cache_event_ids }, [ C(LL ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ - [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ + /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, + /* + * Use RFO, not WRITEBACK, because a write miss would typically occur + * on RFO. + */ [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ - [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ + /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01bb, + /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ - [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ + /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, }, [ C(DTLB) ] = { @@ -180,6 +351,39 @@ static __initconst const u64 westmere_hw_cache_event_ids }, }; +/* + * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 + */ + +#define DMND_DATA_RD (1 << 0) +#define DMND_RFO (1 << 1) +#define DMND_WB (1 << 3) +#define PF_DATA_RD (1 << 4) +#define PF_DATA_RFO (1 << 5) +#define RESP_UNCORE_HIT (1 << 8) +#define RESP_MISS (0xf600) /* non uncore hit */ + +static __initconst const u64 nehalem_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, + [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, + [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, + [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, + }, + } +}; + static __initconst const u64 nehalem_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -215,16 +419,26 @@ static __initconst const u64 nehalem_hw_cache_event_ids }, [ C(LL ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ - [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, + /* + * Use RFO, not WRITEBACK, because a write miss would typically occur + * on RFO. + */ [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ - [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ - [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, }, [ C(DTLB) ] = { @@ -691,8 +905,8 @@ static void intel_pmu_reset(void) printk("clearing PMU state on CPU#%d\n", smp_processor_id()); for (idx = 0; idx < x86_pmu.num_counters; idx++) { - checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); - checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); + checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); + checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); } for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); @@ -794,6 +1008,67 @@ intel_bts_constraints(struct perf_event *event) } static struct event_constraint * +intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; + struct event_constraint *c; + struct intel_percore *pc; + struct er_account *era; + int i; + int free_slot; + int found; + + if (!x86_pmu.percore_constraints || hwc->extra_alloc) + return NULL; + + for (c = x86_pmu.percore_constraints; c->cmask; c++) { + if (e != c->code) + continue; + + /* + * Allocate resource per core. + */ + pc = cpuc->per_core; + if (!pc) + break; + c = &emptyconstraint; + raw_spin_lock(&pc->lock); + free_slot = -1; + found = 0; + for (i = 0; i < MAX_EXTRA_REGS; i++) { + era = &pc->regs[i]; + if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { + /* Allow sharing same config */ + if (hwc->extra_config == era->extra_config) { + era->ref++; + cpuc->percore_used = 1; + hwc->extra_alloc = 1; + c = NULL; + } + /* else conflict */ + found = 1; + break; + } else if (era->ref == 0 && free_slot == -1) + free_slot = i; + } + if (!found && free_slot != -1) { + era = &pc->regs[free_slot]; + era->ref = 1; + era->extra_reg = hwc->extra_reg; + era->extra_config = hwc->extra_config; + cpuc->percore_used = 1; + hwc->extra_alloc = 1; + c = NULL; + } + raw_spin_unlock(&pc->lock); + return c; + } + + return NULL; +} + +static struct event_constraint * intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { struct event_constraint *c; @@ -806,9 +1081,51 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event if (c) return c; + c = intel_percore_constraints(cpuc, event); + if (c) + return c; + return x86_get_event_constraints(cpuc, event); } +static void intel_put_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct extra_reg *er; + struct intel_percore *pc; + struct er_account *era; + struct hw_perf_event *hwc = &event->hw; + int i, allref; + + if (!cpuc->percore_used) + return; + + for (er = x86_pmu.extra_regs; er->msr; er++) { + if (er->event != (hwc->config & er->config_mask)) + continue; + + pc = cpuc->per_core; + raw_spin_lock(&pc->lock); + for (i = 0; i < MAX_EXTRA_REGS; i++) { + era = &pc->regs[i]; + if (era->ref > 0 && + era->extra_config == hwc->extra_config && + era->extra_reg == er->msr) { + era->ref--; + hwc->extra_alloc = 0; + break; + } + } + allref = 0; + for (i = 0; i < MAX_EXTRA_REGS; i++) + allref += pc->regs[i].ref; + if (allref == 0) + cpuc->percore_used = 0; + raw_spin_unlock(&pc->lock); + break; + } +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -880,20 +1197,67 @@ static __initconst const struct x86_pmu core_pmu = { */ .max_period = (1ULL << 31) - 1, .get_event_constraints = intel_get_event_constraints, + .put_event_constraints = intel_put_event_constraints, .event_constraints = intel_core_event_constraints, }; +static int intel_pmu_cpu_prepare(int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + + if (!cpu_has_ht_siblings()) + return NOTIFY_OK; + + cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), + GFP_KERNEL, cpu_to_node(cpu)); + if (!cpuc->per_core) + return NOTIFY_BAD; + + raw_spin_lock_init(&cpuc->per_core->lock); + cpuc->per_core->core_id = -1; + return NOTIFY_OK; +} + static void intel_pmu_cpu_starting(int cpu) { + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + int core_id = topology_core_id(cpu); + int i; + init_debug_store_on_cpu(cpu); /* * Deal with CPUs that don't clear their LBRs on power-up. */ intel_pmu_lbr_reset(); + + if (!cpu_has_ht_siblings()) + return; + + for_each_cpu(i, topology_thread_cpumask(cpu)) { + struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; + + if (pc && pc->core_id == core_id) { + kfree(cpuc->per_core); + cpuc->per_core = pc; + break; + } + } + + cpuc->per_core->core_id = core_id; + cpuc->per_core->refcnt++; } static void intel_pmu_cpu_dying(int cpu) { + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct intel_percore *pc = cpuc->per_core; + + if (pc) { + if (pc->core_id == -1 || --pc->refcnt == 0) + kfree(pc); + cpuc->per_core = NULL; + } + fini_debug_store_on_cpu(cpu); } @@ -918,7 +1282,9 @@ static __initconst const struct x86_pmu intel_pmu = { */ .max_period = (1ULL << 31) - 1, .get_event_constraints = intel_get_event_constraints, + .put_event_constraints = intel_put_event_constraints, + .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, }; @@ -1024,6 +1390,7 @@ static __init int intel_pmu_init(void) intel_pmu_lbr_init_core(); x86_pmu.event_constraints = intel_core2_event_constraints; + x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; pr_cont("Core2 events, "); break; @@ -1032,11 +1399,16 @@ static __init int intel_pmu_init(void) case 46: /* 45 nm nehalem-ex, "Beckton" */ memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); intel_pmu_lbr_init_nhm(); x86_pmu.event_constraints = intel_nehalem_event_constraints; + x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; + x86_pmu.percore_constraints = intel_nehalem_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; + x86_pmu.extra_regs = intel_nehalem_extra_regs; pr_cont("Nehalem events, "); break; @@ -1047,6 +1419,7 @@ static __init int intel_pmu_init(void) intel_pmu_lbr_init_atom(); x86_pmu.event_constraints = intel_gen_event_constraints; + x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; pr_cont("Atom events, "); break; @@ -1054,14 +1427,30 @@ static __init int intel_pmu_init(void) case 44: /* 32 nm nehalem, "Gulftown" */ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); intel_pmu_lbr_init_nhm(); x86_pmu.event_constraints = intel_westmere_event_constraints; + x86_pmu.percore_constraints = intel_westmere_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; + x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; + x86_pmu.extra_regs = intel_westmere_extra_regs; pr_cont("Westmere events, "); break; + case 42: /* SandyBridge */ + memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + intel_pmu_lbr_init_nhm(); + + x86_pmu.event_constraints = intel_snb_event_constraints; + x86_pmu.pebs_constraints = intel_snb_pebs_events; + pr_cont("SandyBridge events, "); + break; + default: /* * default constraints for v2 and up diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b7dcd9f2b8a0..b95c66ae4a2a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -361,30 +361,88 @@ static int intel_pmu_drain_bts_buffer(void) /* * PEBS */ - -static struct event_constraint intel_core_pebs_events[] = { - PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */ +static struct event_constraint intel_core2_pebs_event_constraints[] = { + PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ - PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */ - PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ - PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */ - PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ - PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */ + INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_atom_pebs_event_constraints[] = { + PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ + PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ + INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; -static struct event_constraint intel_nehalem_pebs_events[] = { - PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */ - PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */ - PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */ - PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */ - PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */ - PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ - PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */ - PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ - PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */ +static struct event_constraint intel_nehalem_pebs_event_constraints[] = { + INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ + PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ + INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ + INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ + PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ + INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ + PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ + INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_westmere_pebs_event_constraints[] = { + INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ + PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ + INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ + + INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ + PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ + INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_snb_pebs_events[] = { + PEBS_EVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ + PEBS_EVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ + PEBS_EVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ + PEBS_EVENT_CONSTRAINT(0x01c4, 0xf), /* BR_INST_RETIRED.CONDITIONAL */ + PEBS_EVENT_CONSTRAINT(0x02c4, 0xf), /* BR_INST_RETIRED.NEAR_CALL */ + PEBS_EVENT_CONSTRAINT(0x04c4, 0xf), /* BR_INST_RETIRED.ALL_BRANCHES */ + PEBS_EVENT_CONSTRAINT(0x08c4, 0xf), /* BR_INST_RETIRED.NEAR_RETURN */ + PEBS_EVENT_CONSTRAINT(0x10c4, 0xf), /* BR_INST_RETIRED.NOT_TAKEN */ + PEBS_EVENT_CONSTRAINT(0x20c4, 0xf), /* BR_INST_RETIRED.NEAR_TAKEN */ + PEBS_EVENT_CONSTRAINT(0x40c4, 0xf), /* BR_INST_RETIRED.FAR_BRANCH */ + PEBS_EVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */ + PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ + PEBS_EVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */ + PEBS_EVENT_CONSTRAINT(0x10c5, 0xf), /* BR_MISP_RETIRED.NOT_TAKEN */ + PEBS_EVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.TAKEN */ + PEBS_EVENT_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ + PEBS_EVENT_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORE */ + PEBS_EVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ + PEBS_EVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ + PEBS_EVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ + PEBS_EVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ + PEBS_EVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ + PEBS_EVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ + PEBS_EVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ + PEBS_EVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ + PEBS_EVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */ + PEBS_EVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */ + PEBS_EVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.LLC_HIT */ + PEBS_EVENT_CONSTRAINT(0x40d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */ + PEBS_EVENT_CONSTRAINT(0x01d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */ + PEBS_EVENT_CONSTRAINT(0x02d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */ + PEBS_EVENT_CONSTRAINT(0x04d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM */ + PEBS_EVENT_CONSTRAINT(0x08d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE */ + PEBS_EVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ EVENT_CONSTRAINT_END }; @@ -695,20 +753,17 @@ static void intel_ds_init(void) printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; - x86_pmu.pebs_constraints = intel_core_pebs_events; break; case 1: printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; - x86_pmu.pebs_constraints = intel_nehalem_pebs_events; break; default: printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; - break; } } } diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ff751a9f182b..3769ac822f96 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -764,9 +764,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) u64 v; /* an official way for overflow indication */ - rdmsrl(hwc->config_base + hwc->idx, v); + rdmsrl(hwc->config_base, v); if (v & P4_CCCR_OVF) { - wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF); + wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); return 1; } @@ -815,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) * state we need to clear P4_CCCR_OVF, otherwise interrupt get * asserted again and again */ - (void)checking_wrmsrl(hwc->config_base + hwc->idx, + (void)checking_wrmsrl(hwc->config_base, (u64)(p4_config_unpack_cccr(hwc->config)) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); } @@ -885,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event) p4_pmu_enable_pebs(hwc->config); (void)checking_wrmsrl(escr_addr, escr_conf); - (void)checking_wrmsrl(hwc->config_base + hwc->idx, + (void)checking_wrmsrl(hwc->config_base, (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); } diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 34ba07be2cda..20c097e33860 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event) if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); + (void)checking_wrmsrl(hwc->config_base, val); } static void p6_pmu_enable_event(struct perf_event *event) @@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event) if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); + (void)checking_wrmsrl(hwc->config_base, val); } static __initconst const struct x86_pmu p6_pmu = { diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index d5a236615501..966512b2cacf 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -46,6 +46,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) /* returns the bit offset of the performance counter register */ switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: + if (msr >= MSR_F15H_PERF_CTR) + return (msr - MSR_F15H_PERF_CTR) >> 1; return msr - MSR_K7_PERFCTR0; case X86_VENDOR_INTEL: if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) @@ -70,6 +72,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) /* returns the bit offset of the event selection register */ switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: + if (msr >= MSR_F15H_PERF_CTL) + return (msr - MSR_F15H_PERF_CTL) >> 1; return msr - MSR_K7_EVNTSEL0; case X86_VENDOR_INTEL: if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c new file mode 100644 index 000000000000..7a8cebc9ff29 --- /dev/null +++ b/arch/x86/kernel/devicetree.c @@ -0,0 +1,441 @@ +/* + * Architecture specific OF callbacks. + */ +#include <linux/bootmem.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/of_pci.h> + +#include <asm/hpet.h> +#include <asm/irq_controller.h> +#include <asm/apic.h> +#include <asm/pci_x86.h> + +__initdata u64 initial_dtb; +char __initdata cmd_line[COMMAND_LINE_SIZE]; +static LIST_HEAD(irq_domains); +static DEFINE_RAW_SPINLOCK(big_irq_lock); + +int __initdata of_ioapic; + +#ifdef CONFIG_X86_IO_APIC +static void add_interrupt_host(struct irq_domain *ih) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&big_irq_lock, flags); + list_add(&ih->l, &irq_domains); + raw_spin_unlock_irqrestore(&big_irq_lock, flags); +} +#endif + +static struct irq_domain *get_ih_from_node(struct device_node *controller) +{ + struct irq_domain *ih, *found = NULL; + unsigned long flags; + + raw_spin_lock_irqsave(&big_irq_lock, flags); + list_for_each_entry(ih, &irq_domains, l) { + if (ih->controller == controller) { + found = ih; + break; + } + } + raw_spin_unlock_irqrestore(&big_irq_lock, flags); + return found; +} + +unsigned int irq_create_of_mapping(struct device_node *controller, + const u32 *intspec, unsigned int intsize) +{ + struct irq_domain *ih; + u32 virq, type; + int ret; + + ih = get_ih_from_node(controller); + if (!ih) + return 0; + ret = ih->xlate(ih, intspec, intsize, &virq, &type); + if (ret) + return ret; + if (type == IRQ_TYPE_NONE) + return virq; + /* set the mask if it is different from current */ + if (type == (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) + set_irq_type(virq, type); + return virq; +} +EXPORT_SYMBOL_GPL(irq_create_of_mapping); + +unsigned long pci_address_to_pio(phys_addr_t address) +{ + /* + * The ioport address can be directly used by inX / outX + */ + BUG_ON(address >= (1 << 16)); + return (unsigned long)address; +} +EXPORT_SYMBOL_GPL(pci_address_to_pio); + +void __init early_init_dt_scan_chosen_arch(unsigned long node) +{ + BUG(); +} + +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + BUG(); +} + +void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); +} + +void __init add_dtb(u64 data) +{ + initial_dtb = data + offsetof(struct setup_data, data); +} + +/* + * CE4100 ids. Will be moved to machine_device_initcall() once we have it. + */ +static struct of_device_id __initdata ce4100_ids[] = { + { .compatible = "intel,ce4100-cp", }, + { .compatible = "isa", }, + { .compatible = "pci", }, + {}, +}; + +static int __init add_bus_probe(void) +{ + if (!of_have_populated_dt()) + return 0; + + return of_platform_bus_probe(NULL, ce4100_ids, NULL); +} +module_init(add_bus_probe); + +#ifdef CONFIG_PCI +static int x86_of_pci_irq_enable(struct pci_dev *dev) +{ + struct of_irq oirq; + u32 virq; + int ret; + u8 pin; + + ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (ret) + return ret; + if (!pin) + return 0; + + ret = of_irq_map_pci(dev, &oirq); + if (ret) + return ret; + + virq = irq_create_of_mapping(oirq.controller, oirq.specifier, + oirq.size); + if (virq == 0) + return -EINVAL; + dev->irq = virq; + return 0; +} + +static void x86_of_pci_irq_disable(struct pci_dev *dev) +{ +} + +void __cpuinit x86_of_pci_init(void) +{ + struct device_node *np; + + pcibios_enable_irq = x86_of_pci_irq_enable; + pcibios_disable_irq = x86_of_pci_irq_disable; + + for_each_node_by_type(np, "pci") { + const void *prop; + struct pci_bus *bus; + unsigned int bus_min; + struct device_node *child; + + prop = of_get_property(np, "bus-range", NULL); + if (!prop) + continue; + bus_min = be32_to_cpup(prop); + + bus = pci_find_bus(0, bus_min); + if (!bus) { + printk(KERN_ERR "Can't find a node for bus %s.\n", + np->full_name); + continue; + } + + if (bus->self) + bus->self->dev.of_node = np; + else + bus->dev.of_node = np; + + for_each_child_of_node(np, child) { + struct pci_dev *dev; + u32 devfn; + + prop = of_get_property(child, "reg", NULL); + if (!prop) + continue; + + devfn = (be32_to_cpup(prop) >> 8) & 0xff; + dev = pci_get_slot(bus, devfn); + if (!dev) + continue; + dev->dev.of_node = child; + pci_dev_put(dev); + } + } +} +#endif + +static void __init dtb_setup_hpet(void) +{ +#ifdef CONFIG_HPET_TIMER + struct device_node *dn; + struct resource r; + int ret; + + dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet"); + if (!dn) + return; + ret = of_address_to_resource(dn, 0, &r); + if (ret) { + WARN_ON(1); + return; + } + hpet_address = r.start; +#endif +} + +static void __init dtb_lapic_setup(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + struct device_node *dn; + struct resource r; + int ret; + + dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic"); + if (!dn) + return; + + ret = of_address_to_resource(dn, 0, &r); + if (WARN_ON(ret)) + return; + + /* Did the boot loader setup the local APIC ? */ + if (!cpu_has_apic) { + if (apic_force_enable(r.start)) + return; + } + smp_found_config = 1; + pic_mode = 1; + register_lapic_address(r.start); + generic_processor_info(boot_cpu_physical_apicid, + GET_APIC_VERSION(apic_read(APIC_LVR))); +#endif +} + +#ifdef CONFIG_X86_IO_APIC +static unsigned int ioapic_id; + +static void __init dtb_add_ioapic(struct device_node *dn) +{ + struct resource r; + int ret; + + ret = of_address_to_resource(dn, 0, &r); + if (ret) { + printk(KERN_ERR "Can't obtain address from node %s.\n", + dn->full_name); + return; + } + mp_register_ioapic(++ioapic_id, r.start, gsi_top); +} + +static void __init dtb_ioapic_setup(void) +{ + struct device_node *dn; + + for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic") + dtb_add_ioapic(dn); + + if (nr_ioapics) { + of_ioapic = 1; + return; + } + printk(KERN_ERR "Error: No information about IO-APIC in OF.\n"); +} +#else +static void __init dtb_ioapic_setup(void) {} +#endif + +static void __init dtb_apic_setup(void) +{ + dtb_lapic_setup(); + dtb_ioapic_setup(); +} + +#ifdef CONFIG_OF_FLATTREE +static void __init x86_flattree_get_config(void) +{ + u32 size, map_len; + void *new_dtb; + + if (!initial_dtb) + return; + + map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), + (u64)sizeof(struct boot_param_header)); + + initial_boot_params = early_memremap(initial_dtb, map_len); + size = be32_to_cpu(initial_boot_params->totalsize); + if (map_len < size) { + early_iounmap(initial_boot_params, map_len); + initial_boot_params = early_memremap(initial_dtb, size); + map_len = size; + } + + new_dtb = alloc_bootmem(size); + memcpy(new_dtb, initial_boot_params, size); + early_iounmap(initial_boot_params, map_len); + + initial_boot_params = new_dtb; + + /* root level address cells */ + of_scan_flat_dt(early_init_dt_scan_root, NULL); + + unflatten_device_tree(); +} +#else +static inline void x86_flattree_get_config(void) { } +#endif + +void __init x86_dtb_init(void) +{ + x86_flattree_get_config(); + + if (!of_have_populated_dt()) + return; + + dtb_setup_hpet(); + dtb_apic_setup(); +} + +#ifdef CONFIG_X86_IO_APIC + +struct of_ioapic_type { + u32 out_type; + u32 trigger; + u32 polarity; +}; + +static struct of_ioapic_type of_ioapic_type[] = +{ + { + .out_type = IRQ_TYPE_EDGE_RISING, + .trigger = IOAPIC_EDGE, + .polarity = 1, + }, + { + .out_type = IRQ_TYPE_LEVEL_LOW, + .trigger = IOAPIC_LEVEL, + .polarity = 0, + }, + { + .out_type = IRQ_TYPE_LEVEL_HIGH, + .trigger = IOAPIC_LEVEL, + .polarity = 1, + }, + { + .out_type = IRQ_TYPE_EDGE_FALLING, + .trigger = IOAPIC_EDGE, + .polarity = 0, + }, +}; + +static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, + u32 *out_hwirq, u32 *out_type) +{ + struct io_apic_irq_attr attr; + struct of_ioapic_type *it; + u32 line, idx, type; + + if (intsize < 2) + return -EINVAL; + + line = *intspec; + idx = (u32) id->priv; + *out_hwirq = line + mp_gsi_routing[idx].gsi_base; + + intspec++; + type = *intspec; + + if (type >= ARRAY_SIZE(of_ioapic_type)) + return -EINVAL; + + it = of_ioapic_type + type; + *out_type = it->out_type; + + set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); + + return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); +} + +static void __init ioapic_add_ofnode(struct device_node *np) +{ + struct resource r; + int i, ret; + + ret = of_address_to_resource(np, 0, &r); + if (ret) { + printk(KERN_ERR "Failed to obtain address for %s\n", + np->full_name); + return; + } + + for (i = 0; i < nr_ioapics; i++) { + if (r.start == mp_ioapics[i].apicaddr) { + struct irq_domain *id; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + BUG_ON(!id); + id->controller = np; + id->xlate = ioapic_xlate; + id->priv = (void *)i; + add_interrupt_host(id); + return; + } + } + printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name); +} + +void __init x86_add_irq_domains(void) +{ + struct device_node *dp; + + if (!of_have_populated_dt()) + return; + + for_each_node_with_property(dp, "interrupt-controller") { + if (of_device_is_compatible(dp, "intel,ce4100-ioapic")) + ioapic_add_ofnode(dp); + } +} +#else +void __init x86_add_irq_domains(void) { } +#endif diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index df20723a6a1b..220a1c11cfde 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -320,31 +320,6 @@ void die(const char *str, struct pt_regs *regs, long err) oops_end(flags, regs, sig); } -void notrace __kprobes -die_nmi(char *str, struct pt_regs *regs, int do_panic) -{ - unsigned long flags; - - if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) - return; - - /* - * We are in trouble anyway, lets at least try - * to get a message out. - */ - flags = oops_begin(); - printk(KERN_EMERG "%s", str); - printk(" on CPU%d, ip %08lx, registers:\n", - smp_processor_id(), regs->ip); - show_registers(regs); - oops_end(flags, regs, 0); - if (do_panic || panic_on_oops) - panic("Non maskable interrupt"); - nmi_exit(); - local_irq_enable(); - do_exit(SIGBUS); -} - static int __init oops_setup(char *s) { if (!s) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 294f26da0c0c..cdf5bfd9d4d5 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -667,21 +667,15 @@ __init void e820_setup_gap(void) * boot_params.e820_map, others are passed via SETUP_E820_EXT node of * linked list of struct setup_data, which is parsed here. */ -void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) +void __init parse_e820_ext(struct setup_data *sdata) { - u32 map_len; int entries; struct e820entry *extmap; entries = sdata->len / sizeof(struct e820entry); - map_len = sdata->len + sizeof(struct setup_data); - if (map_len > PAGE_SIZE) - sdata = early_ioremap(pa_data, map_len); extmap = (struct e820entry *)(sdata->data); __append_e820_map(extmap, entries); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - if (map_len > PAGE_SIZE) - early_iounmap(sdata, map_len); printk(KERN_INFO "extended physical RAM map:\n"); e820_print_map("extended"); } @@ -847,15 +841,21 @@ static int __init parse_memopt(char *p) if (!p) return -EINVAL; -#ifdef CONFIG_X86_32 if (!strcmp(p, "nopentium")) { +#ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_PSE); return 0; - } +#else + printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); + return -EINVAL; #endif + } userdef = 1; mem_size = memparse(p, &p); + /* don't remove all of memory when handling "mem={invalid}" param */ + if (mem_size == 0) + return -EINVAL; e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9efbdcc56425..3755ef494390 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) if (rev >= 0x40) acpi_fix_pin2_polarity = 1; - if (rev > 0x13) + /* + * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... + * SB700: revisions 0x39, 0x3a, ... + * SB800: revisions 0x40, 0x41, ... + */ + if (rev >= 0x39) return; if (acpi_use_timer_override) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c8b4efad7ebb..5c1a91974918 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -65,6 +65,8 @@ #define sysexit_audit syscall_exit_work #endif + .section .entry.text, "ax" + /* * We use macros for low-level operations which need to be overridden * for paravirtualization. The following will never clobber any registers: @@ -395,7 +397,7 @@ sysenter_past_esp: * A tiny bit of offset fixup is necessary - 4*4 means the 4 words * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) + pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) CFI_REL_OFFSET eip, 0 pushl_cfi %eax @@ -788,7 +790,7 @@ ENDPROC(ptregs_clone) */ .section .init.rodata,"a" ENTRY(interrupt) -.text +.section .entry.text, "ax" .p2align 5 .p2align CONFIG_X86_L1_CACHE_SHIFT ENTRY(irq_entries_start) @@ -807,7 +809,7 @@ vector=FIRST_EXTERNAL_VECTOR .endif .previous .long 1b - .text + .section .entry.text, "ax" vector=vector+1 .endif .endr @@ -1409,11 +1411,10 @@ END(general_protection) #ifdef CONFIG_KVM_GUEST ENTRY(async_page_fault) RING0_EC_FRAME - pushl $do_async_page_fault - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_async_page_fault jmp error_code CFI_ENDPROC -END(apf_page_fault) +END(async_page_fault) #endif /* diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index aed1ffbeb0c9..b72b4a6466a9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -61,6 +61,8 @@ #define __AUDIT_ARCH_LE 0x40000000 .code64 + .section .entry.text, "ax" + #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) @@ -744,7 +746,7 @@ END(stub_rt_sigreturn) */ .section .init.rodata,"a" ENTRY(interrupt) - .text + .section .entry.text .p2align 5 .p2align CONFIG_X86_L1_CACHE_SHIFT ENTRY(irq_entries_start) @@ -763,7 +765,7 @@ vector=FIRST_EXTERNAL_VECTOR .endif .previous .quad 1b - .text + .section .entry.text vector=vector+1 .endif .endr @@ -975,9 +977,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_SMP -.irpc idx, "01234567" +.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +.if NUM_INVALIDATE_TLB_VECTORS > \idx apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ invalidate_interrupt\idx smp_invalidate_interrupt +.endif .endr #endif @@ -1248,7 +1253,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) decl PER_CPU_VAR(irq_count) jmp error_exit CFI_ENDPROC -END(do_hypervisor_callback) +END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 382eb2936d4d..a93742a57468 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -437,18 +437,19 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, return; } - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer) == -EBUSY) { - *parent = old; - return; - } - trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; *parent = old; + return; + } + + if (ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer) == -EBUSY) { + *parent = old; + return; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 7f138b3c3c52..d6d6bb361931 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -34,15 +34,6 @@ void __init i386_start_kernel(void) { memblock_init(); -#ifdef CONFIG_X86_TRAMPOLINE - /* - * But first pinch a few for the stack/trampoline stuff - * FIXME: Don't need the extra page at 4K, but need to fix - * trampoline before removing it. (see the GDT stuff) - */ - memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); -#endif - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 767d6c43de37..ce0be7cd085e 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -73,7 +73,7 @@ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT */ KERNEL_PAGES = LOWMEM_PAGES -INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm +INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* @@ -137,7 +137,7 @@ ENTRY(startup_32) movsl 1: -#ifdef CONFIG_OLPC_OPENFIRMWARE +#ifdef CONFIG_OLPC /* save OFW's pgdir table for later use when calling into OFW */ movl %cr3, %eax movl %eax, pa(olpc_ofw_pgd) @@ -623,7 +623,7 @@ ENTRY(initial_code) * BSS section */ __PAGE_ALIGNED_BSS - .align PAGE_SIZE_asm + .align PAGE_SIZE #ifdef CONFIG_X86_PAE initial_pg_pmd: .fill 1024*KPMDS,4,0 @@ -644,7 +644,7 @@ ENTRY(swapper_pg_dir) #ifdef CONFIG_X86_PAE __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ - .align PAGE_SIZE_asm + .align PAGE_SIZE ENTRY(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 @@ -662,7 +662,7 @@ ENTRY(initial_page_table) # else # error "Kernel PMDs should be 1, 2 or 3" # endif - .align PAGE_SIZE_asm /* needs to be page-sized too */ + .align PAGE_SIZE /* needs to be page-sized too */ #endif .data diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 239046bd447f..e11e39478a49 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -136,10 +136,9 @@ ident_complete: /* Fixup phys_base */ addq %rbp, phys_base(%rip) -#ifdef CONFIG_X86_TRAMPOLINE + /* Fixup trampoline */ addq %rbp, trampoline_level4_pgt + 0(%rip) addq %rbp, trampoline_level4_pgt + (511*8)(%rip) -#endif /* Due to ENTRY(), sometimes the empty space gets filled with * zeros. Better take a jmp than relying on empty space being diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4ff5968f12d2..bfe8f729e086 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) if (!irq) return -EINVAL; - set_irq_data(irq, dev); + irq_set_handler_data(irq, dev); if (hpet_setup_msi_irq(irq)) return -EINVAL; diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 20757cb2efa3..d9ca749c123b 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); - set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, + irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, i8259A_chip.name); enable_irq(irq); } diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 8eec0ec59af2..8c968974253d 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -14,22 +14,9 @@ #include <linux/slab.h> #include <linux/thread_info.h> #include <linux/syscalls.h> +#include <linux/bitmap.h> #include <asm/syscalls.h> -/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ -static void set_bitmap(unsigned long *bitmap, unsigned int base, - unsigned int extent, int new_value) -{ - unsigned int i; - - for (i = base; i < base + extent; i++) { - if (new_value) - __set_bit(i, bitmap); - else - __clear_bit(i, bitmap); - } -} - /* * this changes the io permissions bitmap in the current task. */ @@ -69,7 +56,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) */ tss = &per_cpu(init_tss, get_cpu()); - set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); + if (turn_on) + bitmap_clear(t->io_bitmap_ptr, from, num); + else + bitmap_set(t->io_bitmap_ptr, from, num); /* * Search for a (possibly new) maximum. This is simple and stupid, diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 387b6a0c9e81..948a31eae75f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq) #define irq_stats(x) (&per_cpu(irq_stat, x)) /* - * /proc/interrupts printing: + * /proc/interrupts printing for arch specific interrupts */ -static int show_other_interrupts(struct seq_file *p, int prec) +int arch_show_interrupts(struct seq_file *p, int prec) { int j; @@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) return 0; } -int show_interrupts(struct seq_file *p, void *v) -{ - unsigned long flags, any_count = 0; - int i = *(loff_t *) v, j, prec; - struct irqaction *action; - struct irq_desc *desc; - - if (i > nr_irqs) - return 0; - - for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) - j *= 10; - - if (i == nr_irqs) - return show_other_interrupts(p, prec); - - /* print header */ - if (i == 0) { - seq_printf(p, "%*s", prec + 8, ""); - for_each_online_cpu(j) - seq_printf(p, "CPU%-8d", j); - seq_putc(p, '\n'); - } - - desc = irq_to_desc(i); - if (!desc) - return 0; - - raw_spin_lock_irqsave(&desc->lock, flags); - for_each_online_cpu(j) - any_count |= kstat_irqs_cpu(i, j); - action = desc->action; - if (!action && !any_count) - goto out; - - seq_printf(p, "%*d: ", prec, i); - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); - seq_printf(p, " %8s", desc->irq_data.chip->name); - seq_printf(p, "-%-8s", desc->name); - - if (action) { - seq_printf(p, " %s", action->name); - while ((action = action->next) != NULL) - seq_printf(p, ", %s", action->name); - } - - seq_putc(p, '\n'); -out: - raw_spin_unlock_irqrestore(&desc->lock, flags); - return 0; -} - /* * /proc/stat helpers */ @@ -276,15 +223,6 @@ void smp_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); -#ifdef CONFIG_OF -unsigned int irq_create_of_mapping(struct device_node *controller, - const u32 *intspec, unsigned int intsize) -{ - return intspec[0]; -} -EXPORT_SYMBOL_GPL(irq_create_of_mapping); -#endif - #ifdef CONFIG_HOTPLUG_CPU /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) @@ -293,6 +231,7 @@ void fixup_irqs(void) static int warned; struct irq_desc *desc; struct irq_data *data; + struct irq_chip *chip; for_each_irq_desc(irq, desc) { int break_affinity = 0; @@ -307,10 +246,10 @@ void fixup_irqs(void) /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); - data = &desc->irq_data; + data = irq_desc_get_irq_data(desc); affinity = data->affinity; if (!irq_has_action(irq) || - cpumask_equal(affinity, cpu_online_mask)) { + cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; } @@ -327,16 +266,17 @@ void fixup_irqs(void) affinity = cpu_all_mask; } - if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) - data->chip->irq_mask(data); + chip = irq_data_get_irq_chip(data); + if (!irqd_can_move_in_process_context(data) && chip->irq_mask) + chip->irq_mask(data); - if (data->chip->irq_set_affinity) - data->chip->irq_set_affinity(data, affinity, true); + if (chip->irq_set_affinity) + chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; - if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) - data->chip->irq_unmask(data); + if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) + chip->irq_unmask(data); raw_spin_unlock(&desc->lock); @@ -368,10 +308,11 @@ void fixup_irqs(void) irq = __this_cpu_read(vector_irq[vector]); desc = irq_to_desc(irq); - data = &desc->irq_data; + data = irq_desc_get_irq_data(desc); + chip = irq_data_get_irq_chip(data); raw_spin_lock(&desc->lock); - if (data->chip->irq_retrigger) - data->chip->irq_retrigger(data); + if (chip->irq_retrigger) + chip->irq_retrigger(data); raw_spin_unlock(&desc->lock); } } diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index c752e973958d..f470e4ef993e 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -25,6 +25,7 @@ #include <asm/setup.h> #include <asm/i8259.h> #include <asm/traps.h> +#include <asm/prom.h> /* * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: @@ -71,6 +72,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) static struct irqaction fpu_irq = { .handler = math_error_irq, .name = "fpu", + .flags = IRQF_NO_THREAD, }; #endif @@ -80,6 +82,7 @@ static struct irqaction fpu_irq = { static struct irqaction irq2 = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { @@ -110,7 +113,7 @@ void __init init_ISA_irqs(void) legacy_pic->init(0); for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) - set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); + irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); } void __init init_IRQ(void) @@ -118,6 +121,12 @@ void __init init_IRQ(void) int i; /* + * We probably need a better place for this, but it works for + * now ... + */ + x86_add_irq_domains(); + + /* * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. * If these IRQ's are handled by legacy interrupt-controllers like PIC, * then this configuration will likely be static after the boot. If @@ -164,14 +173,77 @@ static void __init smp_intr_init(void) alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPIs for invalidation */ - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); - alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); +#define ALLOC_INVTLB_VEC(NR) \ + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \ + invalidate_interrupt##NR) + + switch (NUM_INVALIDATE_TLB_VECTORS) { + default: + ALLOC_INVTLB_VEC(31); + case 31: + ALLOC_INVTLB_VEC(30); + case 30: + ALLOC_INVTLB_VEC(29); + case 29: + ALLOC_INVTLB_VEC(28); + case 28: + ALLOC_INVTLB_VEC(27); + case 27: + ALLOC_INVTLB_VEC(26); + case 26: + ALLOC_INVTLB_VEC(25); + case 25: + ALLOC_INVTLB_VEC(24); + case 24: + ALLOC_INVTLB_VEC(23); + case 23: + ALLOC_INVTLB_VEC(22); + case 22: + ALLOC_INVTLB_VEC(21); + case 21: + ALLOC_INVTLB_VEC(20); + case 20: + ALLOC_INVTLB_VEC(19); + case 19: + ALLOC_INVTLB_VEC(18); + case 18: + ALLOC_INVTLB_VEC(17); + case 17: + ALLOC_INVTLB_VEC(16); + case 16: + ALLOC_INVTLB_VEC(15); + case 15: + ALLOC_INVTLB_VEC(14); + case 14: + ALLOC_INVTLB_VEC(13); + case 13: + ALLOC_INVTLB_VEC(12); + case 12: + ALLOC_INVTLB_VEC(11); + case 11: + ALLOC_INVTLB_VEC(10); + case 10: + ALLOC_INVTLB_VEC(9); + case 9: + ALLOC_INVTLB_VEC(8); + case 8: + ALLOC_INVTLB_VEC(7); + case 7: + ALLOC_INVTLB_VEC(6); + case 6: + ALLOC_INVTLB_VEC(5); + case 5: + ALLOC_INVTLB_VEC(4); + case 4: + ALLOC_INVTLB_VEC(3); + case 3: + ALLOC_INVTLB_VEC(2); + case 2: + ALLOC_INVTLB_VEC(1); + case 1: + ALLOC_INVTLB_VEC(0); + break; + } /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); @@ -243,7 +315,7 @@ void __init native_init_IRQ(void) set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); } - if (!acpi_ioapic) + if (!acpi_ioapic && !of_ioapic) setup_irq(2, &irq2); #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index a4130005028a..7c64c420a9f6 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -533,15 +533,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) } return NOTIFY_DONE; - case DIE_NMIWATCHDOG: - if (atomic_read(&kgdb_active) != -1) { - /* KGDB CPU roundup: */ - kgdb_nmicallback(raw_smp_processor_id(), regs); - return NOTIFY_STOP; - } - /* Enter debugger: */ - break; - case DIE_DEBUG: if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { if (user_mode(regs)) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index d91c477b3f62..c969fd9d1566 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -1276,6 +1276,14 @@ static int __kprobes can_optimize(unsigned long paddr) if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) return 0; + /* + * Do not optimize in the entry code due to the unstable + * stack handling. + */ + if ((paddr >= (unsigned long )__entry_text_start) && + (paddr < (unsigned long )__entry_text_end)) + return 0; + /* Check there is enough space for a relative jump. */ if (size - offset < RELATIVEJUMP_SIZE) return 0; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 0fe6d1a66c38..c5610384ab16 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -66,7 +66,6 @@ struct microcode_amd { unsigned int mpb[0]; }; -#define UCODE_MAX_SIZE 2048 #define UCODE_CONTAINER_SECTION_HDR 8 #define UCODE_CONTAINER_HEADER_SIZE 12 @@ -77,20 +76,20 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) struct cpuinfo_x86 *c = &cpu_data(cpu); u32 dummy; - memset(csig, 0, sizeof(*csig)); if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { - pr_warning("microcode: CPU%d: AMD CPU family 0x%x not " - "supported\n", cpu, c->x86); + pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); return -1; } + rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); - pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); + pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); + return 0; } -static int get_matching_microcode(int cpu, void *mc, int rev) +static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr, + int rev) { - struct microcode_header_amd *mc_header = mc; unsigned int current_cpu_id; u16 equiv_cpu_id = 0; unsigned int i = 0; @@ -109,17 +108,17 @@ static int get_matching_microcode(int cpu, void *mc, int rev) if (!equiv_cpu_id) return 0; - if (mc_header->processor_rev_id != equiv_cpu_id) + if (mc_hdr->processor_rev_id != equiv_cpu_id) return 0; /* ucode might be chipset specific -- currently we don't support this */ - if (mc_header->nb_dev_id || mc_header->sb_dev_id) { - pr_err("CPU%d: loading of chipset specific code not yet supported\n", + if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { + pr_err("CPU%d: chipset specific code not yet supported\n", cpu); return 0; } - if (mc_header->patch_id <= rev) + if (mc_hdr->patch_id <= rev) return 0; return 1; @@ -144,71 +143,93 @@ static int apply_microcode_amd(int cpu) /* check current patch id and patch's id for match */ if (rev != mc_amd->hdr.patch_id) { - pr_err("CPU%d: update failed (for patch_level=0x%x)\n", + pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); return -1; } - pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); uci->cpu_sig.rev = rev; return 0; } -static void * -get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) +static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) { - unsigned int total_size; - u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; - void *mc; + struct cpuinfo_x86 *c = &cpu_data(cpu); + unsigned int max_size, actual_size; + +#define F1XH_MPB_MAX_SIZE 2048 +#define F14H_MPB_MAX_SIZE 1824 +#define F15H_MPB_MAX_SIZE 4096 + + switch (c->x86) { + case 0x14: + max_size = F14H_MPB_MAX_SIZE; + break; + case 0x15: + max_size = F15H_MPB_MAX_SIZE; + break; + default: + max_size = F1XH_MPB_MAX_SIZE; + break; + } - get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR); + actual_size = buf[4] + (buf[5] << 8); - if (section_hdr[0] != UCODE_UCODE_TYPE) { - pr_err("error: invalid type field in container file section header\n"); - return NULL; + if (actual_size > size || actual_size > max_size) { + pr_err("section size mismatch\n"); + return 0; } - total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); + return actual_size; +} - if (total_size > size || total_size > UCODE_MAX_SIZE) { - pr_err("error: size mismatch\n"); - return NULL; +static struct microcode_header_amd * +get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size) +{ + struct microcode_header_amd *mc = NULL; + unsigned int actual_size = 0; + + if (buf[0] != UCODE_UCODE_TYPE) { + pr_err("invalid type field in container file section header\n"); + goto out; } - mc = vzalloc(UCODE_MAX_SIZE); + actual_size = verify_ucode_size(cpu, buf, size); + if (!actual_size) + goto out; + + mc = vzalloc(actual_size); if (!mc) - return NULL; + goto out; - get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size); - *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR; + get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size); + *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR; +out: return mc; } static int install_equiv_cpu_table(const u8 *buf) { - u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; - unsigned int *buf_pos = (unsigned int *)container_hdr; - unsigned long size; - - get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE); - - size = buf_pos[2]; - - if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { - pr_err("error: invalid type field in container file section header\n"); - return 0; + unsigned int *ibuf = (unsigned int *)buf; + unsigned int type = ibuf[1]; + unsigned int size = ibuf[2]; + + if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { + pr_err("empty section/" + "invalid type field in container file section header\n"); + return -EINVAL; } equiv_cpu_table = vmalloc(size); if (!equiv_cpu_table) { pr_err("failed to allocate equivalent CPU table\n"); - return 0; + return -ENOMEM; } - buf += UCODE_CONTAINER_HEADER_SIZE; - get_ucode_data(equiv_cpu_table, buf, size); + get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size); return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ } @@ -223,16 +244,16 @@ static enum ucode_state generic_load_microcode(int cpu, const u8 *data, size_t size) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct microcode_header_amd *mc_hdr = NULL; + unsigned int mc_size, leftover; + int offset; const u8 *ucode_ptr = data; void *new_mc = NULL; - void *mc; - int new_rev = uci->cpu_sig.rev; - unsigned int leftover; - unsigned long offset; + unsigned int new_rev = uci->cpu_sig.rev; enum ucode_state state = UCODE_OK; offset = install_equiv_cpu_table(ucode_ptr); - if (!offset) { + if (offset < 0) { pr_err("failed to create equivalent cpu table\n"); return UCODE_ERROR; } @@ -241,64 +262,65 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) leftover = size - offset; while (leftover) { - unsigned int uninitialized_var(mc_size); - struct microcode_header_amd *mc_header; - - mc = get_next_ucode(ucode_ptr, leftover, &mc_size); - if (!mc) + mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size); + if (!mc_hdr) break; - mc_header = (struct microcode_header_amd *)mc; - if (get_matching_microcode(cpu, mc, new_rev)) { + if (get_matching_microcode(cpu, mc_hdr, new_rev)) { vfree(new_mc); - new_rev = mc_header->patch_id; - new_mc = mc; + new_rev = mc_hdr->patch_id; + new_mc = mc_hdr; } else - vfree(mc); + vfree(mc_hdr); ucode_ptr += mc_size; leftover -= mc_size; } - if (new_mc) { - if (!leftover) { - vfree(uci->mc); - uci->mc = new_mc; - pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); - } else { - vfree(new_mc); - state = UCODE_ERROR; - } - } else + if (!new_mc) { state = UCODE_NFOUND; + goto free_table; + } + if (!leftover) { + vfree(uci->mc); + uci->mc = new_mc; + pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n", + cpu, uci->cpu_sig.rev, new_rev); + } else { + vfree(new_mc); + state = UCODE_ERROR; + } + +free_table: free_equiv_cpu_table(); return state; } -static enum ucode_state request_microcode_fw(int cpu, struct device *device) +static enum ucode_state request_microcode_amd(int cpu, struct device *device) { const char *fw_name = "amd-ucode/microcode_amd.bin"; - const struct firmware *firmware; - enum ucode_state ret; + const struct firmware *fw; + enum ucode_state ret = UCODE_NFOUND; - if (request_firmware(&firmware, fw_name, device)) { - printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); - return UCODE_NFOUND; + if (request_firmware(&fw, fw_name, device)) { + pr_err("failed to load file %s\n", fw_name); + goto out; } - if (*(u32 *)firmware->data != UCODE_MAGIC) { - pr_err("invalid UCODE_MAGIC (0x%08x)\n", - *(u32 *)firmware->data); - return UCODE_ERROR; + ret = UCODE_ERROR; + if (*(u32 *)fw->data != UCODE_MAGIC) { + pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); + goto fw_release; } - ret = generic_load_microcode(cpu, firmware->data, firmware->size); + ret = generic_load_microcode(cpu, fw->data, fw->size); - release_firmware(firmware); +fw_release: + release_firmware(fw); +out: return ret; } @@ -319,7 +341,7 @@ static void microcode_fini_cpu_amd(int cpu) static struct microcode_ops microcode_amd_ops = { .request_microcode_user = request_microcode_user, - .request_microcode_fw = request_microcode_fw, + .request_microcode_fw = request_microcode_amd, .collect_cpu_info = collect_cpu_info_amd, .apply_microcode = apply_microcode_amd, .microcode_fini_cpu = microcode_fini_cpu_amd, diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 1cca374a2bac..87af68e0e1e1 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -417,8 +417,10 @@ static int mc_sysdev_add(struct sys_device *sys_dev) if (err) return err; - if (microcode_init_cpu(cpu) == UCODE_ERROR) - err = -EINVAL; + if (microcode_init_cpu(cpu) == UCODE_ERROR) { + sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); + return -EINVAL; + } return err; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ff4554198981..99fa3adf0141 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -110,12 +110,9 @@ void show_regs_common(void) init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); - printk(KERN_CONT " "); - printk(KERN_CONT "%s %s", vendor, product); - if (board) { - printk(KERN_CONT "/"); - printk(KERN_CONT "%s", board); - } + printk(KERN_CONT " %s %s", vendor, product); + if (board) + printk(KERN_CONT "/%s", board); printk(KERN_CONT "\n"); } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 715037caeb43..d3ce37edb54d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -303,68 +303,16 @@ static int __init reboot_init(void) } core_initcall(reboot_init); -/* The following code and data reboots the machine by switching to real - mode and jumping to the BIOS reset entry point, as if the CPU has - really been reset. The previous version asked the keyboard - controller to pulse the CPU reset line, which is more thorough, but - doesn't work with at least one type of 486 motherboard. It is easy - to stop this code working; hence the copious comments. */ -static const unsigned long long -real_mode_gdt_entries [3] = -{ - 0x0000000000000000ULL, /* Null descriptor */ - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ -}; +extern const unsigned char machine_real_restart_asm[]; +extern const u64 machine_real_restart_gdt[3]; -static const struct desc_ptr -real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries }, -real_mode_idt = { 0x3ff, 0 }; - -/* This is 16-bit protected mode code to disable paging and the cache, - switch to real mode and jump to the BIOS reset code. - - The instruction that switches to real mode by writing to CR0 must be - followed immediately by a far jump instruction, which set CS to a - valid value for real mode, and flushes the prefetch queue to avoid - running instructions that have already been decoded in protected - mode. - - Clears all the flags except ET, especially PG (paging), PE - (protected-mode enable) and TS (task switch for coprocessor state - save). Flushes the TLB after paging has been disabled. Sets CD and - NW, to disable the cache on a 486, and invalidates the cache. This - is more like the state of a 486 after reset. I don't know if - something else should be done for other chips. - - More could be done here to set up the registers as if a CPU reset had - occurred; hopefully real BIOSs don't assume much. */ -static const unsigned char real_mode_switch [] = -{ - 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */ - 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */ - 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */ - 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */ - 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */ - 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */ - 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */ - 0x74, 0x02, /* jz f */ - 0x0f, 0x09, /* wbinvd */ - 0x24, 0x10, /* f: andb $0x10,al */ - 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ -}; -static const unsigned char jump_to_bios [] = +void machine_real_restart(unsigned int type) { - 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */ -}; + void *restart_va; + unsigned long restart_pa; + void (*restart_lowmem)(unsigned int); + u64 *lowmem_gdt; -/* - * Switch to real mode and then execute the code - * specified by the code and length parameters. - * We assume that length will aways be less that 100! - */ -void machine_real_restart(const unsigned char *code, int length) -{ local_irq_disable(); /* Write zero to CMOS register number 0x0f, which the BIOS POST @@ -392,41 +340,23 @@ void machine_real_restart(const unsigned char *code, int length) too. */ *((unsigned short *)0x472) = reboot_mode; - /* For the switch to real mode, copy some code to low memory. It has - to be in the first 64k because it is running in 16-bit mode, and it - has to have the same physical and virtual address, because it turns - off paging. Copy it near the end of the first page, out of the way - of BIOS variables. */ - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100), - real_mode_switch, sizeof (real_mode_switch)); - memcpy((void *)(0x1000 - 100), code, length); - - /* Set up the IDT for real mode. */ - load_idt(&real_mode_idt); - - /* Set up a GDT from which we can load segment descriptors for real - mode. The GDT is not used in real mode; it is just needed here to - prepare the descriptors. */ - load_gdt(&real_mode_gdt); - - /* Load the data segment registers, and thus the descriptors ready for - real mode. The base address of each segment is 0x100, 16 times the - selector value being loaded here. This is so that the segment - registers don't have to be reloaded after switching to real mode: - the values are consistent for real mode operation already. */ - __asm__ __volatile__ ("movl $0x0010,%%eax\n" - "\tmovl %%eax,%%ds\n" - "\tmovl %%eax,%%es\n" - "\tmovl %%eax,%%fs\n" - "\tmovl %%eax,%%gs\n" - "\tmovl %%eax,%%ss" : : : "eax"); - - /* Jump to the 16-bit code that we copied earlier. It disables paging - and the cache, switches to real mode, and jumps to the BIOS reset - entry point. */ - __asm__ __volatile__ ("ljmp $0x0008,%0" - : - : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100))); + /* Patch the GDT in the low memory trampoline */ + lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt); + + restart_va = TRAMPOLINE_SYM(machine_real_restart_asm); + restart_pa = virt_to_phys(restart_va); + restart_lowmem = (void (*)(unsigned int))restart_pa; + + /* GDT[0]: GDT self-pointer */ + lowmem_gdt[0] = + (u64)(sizeof(machine_real_restart_gdt) - 1) + + ((u64)virt_to_phys(lowmem_gdt) << 16); + /* GDT[1]: 64K real mode code segment */ + lowmem_gdt[1] = + GDT_ENTRY(0x009b, restart_pa, 0xffff); + + /* Jump to the identity-mapped low memory code */ + restart_lowmem(type); } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(machine_real_restart); @@ -581,7 +511,7 @@ static void native_machine_emergency_restart(void) #ifdef CONFIG_X86_32 case BOOT_BIOS: - machine_real_restart(jump_to_bios, sizeof(jump_to_bios)); + machine_real_restart(MRR_BIOS); reboot_type = BOOT_KBD; break; diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/kernel/reboot_32.S new file mode 100644 index 000000000000..29092b38d816 --- /dev/null +++ b/arch/x86/kernel/reboot_32.S @@ -0,0 +1,135 @@ +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/segment.h> +#include <asm/page_types.h> + +/* + * The following code and data reboots the machine by switching to real + * mode and jumping to the BIOS reset entry point, as if the CPU has + * really been reset. The previous version asked the keyboard + * controller to pulse the CPU reset line, which is more thorough, but + * doesn't work with at least one type of 486 motherboard. It is easy + * to stop this code working; hence the copious comments. + * + * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax. + */ + .section ".x86_trampoline","a" + .balign 16 + .code32 +ENTRY(machine_real_restart_asm) +r_base = . + /* Get our own relocated address */ + call 1f +1: popl %ebx + subl $1b, %ebx + + /* Compute the equivalent real-mode segment */ + movl %ebx, %ecx + shrl $4, %ecx + + /* Patch post-real-mode segment jump */ + movw dispatch_table(%ebx,%eax,2),%ax + movw %ax, 101f(%ebx) + movw %cx, 102f(%ebx) + + /* Set up the IDT for real mode. */ + lidtl machine_real_restart_idt(%ebx) + + /* + * Set up a GDT from which we can load segment descriptors for real + * mode. The GDT is not used in real mode; it is just needed here to + * prepare the descriptors. + */ + lgdtl machine_real_restart_gdt(%ebx) + + /* + * Load the data segment registers with 16-bit compatible values + */ + movl $16, %ecx + movl %ecx, %ds + movl %ecx, %es + movl %ecx, %fs + movl %ecx, %gs + movl %ecx, %ss + ljmpl $8, $1f - r_base + +/* + * This is 16-bit protected mode code to disable paging and the cache, + * switch to real mode and jump to the BIOS reset code. + * + * The instruction that switches to real mode by writing to CR0 must be + * followed immediately by a far jump instruction, which set CS to a + * valid value for real mode, and flushes the prefetch queue to avoid + * running instructions that have already been decoded in protected + * mode. + * + * Clears all the flags except ET, especially PG (paging), PE + * (protected-mode enable) and TS (task switch for coprocessor state + * save). Flushes the TLB after paging has been disabled. Sets CD and + * NW, to disable the cache on a 486, and invalidates the cache. This + * is more like the state of a 486 after reset. I don't know if + * something else should be done for other chips. + * + * More could be done here to set up the registers as if a CPU reset had + * occurred; hopefully real BIOSs don't assume much. This is not the + * actual BIOS entry point, anyway (that is at 0xfffffff0). + * + * Most of this work is probably excessive, but it is what is tested. + */ + .code16 +1: + xorl %ecx, %ecx + movl %cr0, %eax + andl $0x00000011, %eax + orl $0x60000000, %eax + movl %eax, %cr0 + movl %ecx, %cr3 + movl %cr0, %edx + andl $0x60000000, %edx /* If no cache bits -> no wbinvd */ + jz 2f + wbinvd +2: + andb $0x10, %al + movl %eax, %cr0 + .byte 0xea /* ljmpw */ +101: .word 0 /* Offset */ +102: .word 0 /* Segment */ + +bios: + ljmpw $0xf000, $0xfff0 + +apm: + movw $0x1000, %ax + movw %ax, %ss + movw $0xf000, %sp + movw $0x5307, %ax + movw $0x0001, %bx + movw $0x0003, %cx + int $0x15 + +END(machine_real_restart_asm) + + .balign 16 + /* These must match <asm/reboot.h */ +dispatch_table: + .word bios - r_base + .word apm - r_base +END(dispatch_table) + + .balign 16 +machine_real_restart_idt: + .word 0xffff /* Length - real mode default value */ + .long 0 /* Base - real mode default value */ +END(machine_real_restart_idt) + + .balign 16 +ENTRY(machine_real_restart_gdt) + .quad 0 /* Self-pointer, filled in by PM code */ + .quad 0 /* 16-bit code segment, filled in by PM code */ + /* + * 16-bit data segment with the selector value 16 = 0x10 and + * base value 0x100; since this is consistent with real mode + * semantics we don't have to reload the segments once CR0.PE = 0. + */ + .quad GDT_ENTRY(0x0093, 0x100, 0xffff) +END(machine_real_restart_gdt) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 6f39cab052d5..3f2ad2640d85 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -6,6 +6,7 @@ #include <linux/acpi.h> #include <linux/bcd.h> #include <linux/pnp.h> +#include <linux/of.h> #include <asm/vsyscall.h> #include <asm/x86_init.h> @@ -236,6 +237,8 @@ static __init int add_rtc_cmos(void) } } #endif + if (of_have_populated_dt()) + return 0; platform_device_register(&rtc_device); dev_info(&rtc_device.dev, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3cfe26c0252..9d43b28e0728 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -113,6 +113,7 @@ #endif #include <asm/mce.h> #include <asm/alternative.h> +#include <asm/prom.h> /* * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. @@ -293,10 +294,32 @@ static void __init init_gbpages(void) else direct_gbpages = 0; } + +static void __init cleanup_highmap_brk_end(void) +{ + pud_t *pud; + pmd_t *pmd; + + mmu_cr4_features = read_cr4(); + + /* + * _brk_end cannot change anymore, but it and _end may be + * located on different 2M pages. cleanup_highmap(), however, + * can only consider _end when it runs, so destroy any + * mappings beyond _brk_end here. + */ + pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); + pmd = pmd_offset(pud, _brk_end - 1); + while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) + pmd_clear(pmd); +} #else static inline void init_gbpages(void) { } +static inline void cleanup_highmap_brk_end(void) +{ +} #endif static void __init reserve_brk(void) @@ -307,6 +330,8 @@ static void __init reserve_brk(void) /* Mark brk area as locked down and no longer taking any new allocations */ _brk_start = 0; + + cleanup_highmap_brk_end(); } #ifdef CONFIG_BLK_DEV_INITRD @@ -429,16 +454,30 @@ static void __init parse_setup_data(void) return; pa_data = boot_params.hdr.setup_data; while (pa_data) { - data = early_memremap(pa_data, PAGE_SIZE); + u32 data_len, map_len; + + map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), + (u64)sizeof(struct setup_data)); + data = early_memremap(pa_data, map_len); + data_len = data->len + sizeof(struct setup_data); + if (data_len > map_len) { + early_iounmap(data, map_len); + data = early_memremap(pa_data, data_len); + map_len = data_len; + } + switch (data->type) { case SETUP_E820_EXT: - parse_e820_ext(data, pa_data); + parse_e820_ext(data); + break; + case SETUP_DTB: + add_dtb(pa_data); break; default: break; } pa_data = data->next; - early_iounmap(data, PAGE_SIZE); + early_iounmap(data, map_len); } } @@ -680,15 +719,6 @@ static int __init parse_reservelow(char *p) early_param("reservelow", parse_reservelow); -static u64 __init get_max_mapped(void) -{ - u64 end = max_pfn_mapped; - - end <<= PAGE_SHIFT; - - return end; -} - /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures @@ -704,8 +734,6 @@ static u64 __init get_max_mapped(void) void __init setup_arch(char **cmdline_p) { - int acpi = 0; - int amd = 0; unsigned long flags; #ifdef CONFIG_X86_32 @@ -935,15 +963,8 @@ void __init setup_arch(char **cmdline_p) printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); - reserve_trampoline_memory(); + setup_trampolines(); -#ifdef CONFIG_ACPI_SLEEP - /* - * Reserve low memory region for sleep support. - * even before init_memory_mapping - */ - acpi_reserve_wakeup_memory(); -#endif init_gbpages(); /* max_pfn_mapped is updated here */ @@ -984,19 +1005,7 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); -#ifdef CONFIG_ACPI_NUMA - /* - * Parse SRAT to discover nodes. - */ - acpi = acpi_numa_init(); -#endif - -#ifdef CONFIG_AMD_NUMA - if (!acpi) - amd = !amd_numa_init(0, max_pfn); -#endif - - initmem_init(0, max_pfn, acpi, amd); + initmem_init(); memblock_find_dma_reserve(); dma32_reserve_bootmem(); @@ -1029,8 +1038,8 @@ void __init setup_arch(char **cmdline_p) * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); - sfi_init(); + x86_dtb_init(); /* * get boot-time SMP configuration: @@ -1040,9 +1049,7 @@ void __init setup_arch(char **cmdline_p) prefill_possible_map(); -#ifdef CONFIG_X86_64 init_cpu_to_node(); -#endif init_apic_mappings(); ioapic_and_gsi_init(); @@ -1066,6 +1073,8 @@ void __init setup_arch(char **cmdline_p) #endif x86_init.oem.banner(); + x86_init.timers.wallclock_init(); + mcheck_init(); local_irq_save(flags); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 002b79685f73..71f4727da373 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void) per_cpu(x86_bios_cpu_apicid, cpu) = early_per_cpu_map(x86_bios_cpu_apicid, cpu); #endif +#ifdef CONFIG_X86_32 + per_cpu(x86_cpu_to_logical_apicid, cpu) = + early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); +#endif #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; +#endif #ifdef CONFIG_NUMA per_cpu(x86_cpu_to_node_map, cpu) = early_per_cpu_map(x86_cpu_to_node_map, cpu); @@ -242,7 +247,6 @@ void __init setup_per_cpu_areas(void) */ set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); #endif -#endif /* * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. @@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void) early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; #endif -#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) +#ifdef CONFIG_X86_32 + early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; +#endif +#ifdef CONFIG_NUMA early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 08776a953487..c2871d3c71b6 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -64,6 +64,7 @@ #include <asm/mtrr.h> #include <asm/mwait.h> #include <asm/apic.h> +#include <asm/io_apic.h> #include <asm/setup.h> #include <asm/uv/uv.h> #include <linux/mc146818rtc.h> @@ -71,10 +72,6 @@ #include <asm/smpboot_hooks.h> #include <asm/i8259.h> -#ifdef CONFIG_X86_32 -u8 apicid_2_node[MAX_APICID]; -#endif - /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; @@ -130,68 +127,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); + /* Per CPU bogomips and other parameters */ DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); atomic_t init_deasserted; -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) -/* which node each logical CPU is on */ -int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; -EXPORT_SYMBOL(cpu_to_node_map); - -/* set up a mapping between cpu and node. */ -static void map_cpu_to_node(int cpu, int node) -{ - printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); - cpu_to_node_map[cpu] = node; -} - -/* undo a mapping between cpu and node. */ -static void unmap_cpu_to_node(int cpu) -{ - int node; - - printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); - for (node = 0; node < MAX_NUMNODES; node++) - cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); - cpu_to_node_map[cpu] = 0; -} -#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ -#define map_cpu_to_node(cpu, node) ({}) -#define unmap_cpu_to_node(cpu) ({}) -#endif - -#ifdef CONFIG_X86_32 -static int boot_cpu_logical_apicid; - -u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = - { [0 ... NR_CPUS-1] = BAD_APICID }; - -static void map_cpu_to_logical_apicid(void) -{ - int cpu = smp_processor_id(); - int apicid = logical_smp_processor_id(); - int node = apic->apicid_to_node(apicid); - - if (!node_online(node)) - node = first_online_node; - - cpu_2_logical_apicid[cpu] = apicid; - map_cpu_to_node(cpu, node); -} - -void numa_remove_cpu(int cpu) -{ - cpu_2_logical_apicid[cpu] = BAD_APICID; - unmap_cpu_to_node(cpu); -} -#else -#define map_cpu_to_logical_apicid() do {} while (0) -#endif - /* * Report back to the Boot Processor. * Running on AP. @@ -259,7 +202,6 @@ static void __cpuinit smp_callin(void) apic->smp_callin_clear_local_apic(); setup_local_APIC(); end_local_APIC_setup(); - map_cpu_to_logical_apicid(); /* * Need to setup vector mappings before we enable interrupts. @@ -355,23 +297,6 @@ notrace static void __cpuinit start_secondary(void *unused) cpu_idle(); } -#ifdef CONFIG_CPUMASK_OFFSTACK -/* In this case, llc_shared_map is a pointer to a cpumask. */ -static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, - const struct cpuinfo_x86 *src) -{ - struct cpumask *llc = dst->llc_shared_map; - *dst = *src; - dst->llc_shared_map = llc; -} -#else -static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, - const struct cpuinfo_x86 *src) -{ - *dst = *src; -} -#endif /* CONFIG_CPUMASK_OFFSTACK */ - /* * The bootstrap kernel entry code has set these up. Save them for * a given CPU @@ -381,7 +306,7 @@ void __cpuinit smp_store_cpu_info(int id) { struct cpuinfo_x86 *c = &cpu_data(id); - copy_cpuinfo_x86(c, &boot_cpu_data); + *c = boot_cpu_data; c->cpu_index = id; if (id != 0) identify_secondary_cpu(c); @@ -389,15 +314,12 @@ void __cpuinit smp_store_cpu_info(int id) static void __cpuinit link_thread_siblings(int cpu1, int cpu2) { - struct cpuinfo_x86 *c1 = &cpu_data(cpu1); - struct cpuinfo_x86 *c2 = &cpu_data(cpu2); - cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); - cpumask_set_cpu(cpu1, c2->llc_shared_map); - cpumask_set_cpu(cpu2, c1->llc_shared_map); + cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); + cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); } @@ -414,6 +336,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) if (cpu_has(c, X86_FEATURE_TOPOEXT)) { if (c->phys_proc_id == o->phys_proc_id && + per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) && c->compute_unit_id == o->compute_unit_id) link_thread_siblings(cpu, i); } else if (c->phys_proc_id == o->phys_proc_id && @@ -425,7 +348,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); } - cpumask_set_cpu(cpu, c->llc_shared_map); + cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); @@ -436,8 +359,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) for_each_cpu(i, cpu_sibling_setup_mask) { if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpumask_set_cpu(i, c->llc_shared_map); - cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); + cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); + cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); } if (c->phys_proc_id == cpu_data(i).phys_proc_id) { cpumask_set_cpu(i, cpu_core_mask(cpu)); @@ -476,7 +399,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) !(cpu_has(c, X86_FEATURE_AMD_DCM))) return cpu_core_mask(cpu); else - return c->llc_shared_map; + return cpu_llc_shared_mask(cpu); } static void impress_friends(void) @@ -788,7 +711,7 @@ do_rest: stack_start = c_idle.idle->thread.sp; /* start_ip had better be page-aligned! */ - start_ip = setup_trampoline(); + start_ip = trampoline_address(); /* So we see what's up */ announce_cpu(cpu, apicid); @@ -798,6 +721,8 @@ do_rest: * the targeted processor. */ + printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip); + atomic_set(&init_deasserted, 0); if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { @@ -851,8 +776,8 @@ do_rest: pr_debug("CPU%d: has booted.\n", cpu); else { boot_error = 1; - if (*((volatile unsigned char *)trampoline_base) - == 0xA5) + if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) + == 0xA5A5A5A5) /* trampoline started but...? */ pr_err("CPU%d: Stuck ??\n", cpu); else @@ -878,7 +803,7 @@ do_rest: } /* mark "stuck" area as not stuck */ - *((volatile unsigned long *)trampoline_base) = 0; + *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0; if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { /* @@ -945,6 +870,14 @@ int __cpuinit native_cpu_up(unsigned int cpu) return 0; } +/** + * arch_disable_smp_support() - disables SMP support for x86 at runtime + */ +void arch_disable_smp_support(void) +{ + disable_ioapic_support(); +} + /* * Fall back to non SMP mode after errors. * @@ -960,7 +893,6 @@ static __init void disable_smp(void) physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); else physid_set_mask_of_physid(0, &phys_cpu_present_map); - map_cpu_to_logical_apicid(); cpumask_set_cpu(0, cpu_sibling_mask(0)); cpumask_set_cpu(0, cpu_core_mask(0)); } @@ -1045,7 +977,7 @@ static int __init smp_sanity_check(unsigned max_cpus) "(tell your hw vendor)\n"); } smpboot_clear_io_apic(); - arch_disable_smp_support(); + disable_ioapic_support(); return -1; } @@ -1089,21 +1021,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) preempt_disable(); smp_cpu_index_default(); - memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info)); - cpumask_copy(cpu_callin_mask, cpumask_of(0)); - mb(); + /* * Setup boot CPU information */ smp_store_cpu_info(0); /* Final full version of the data */ -#ifdef CONFIG_X86_32 - boot_cpu_logical_apicid = logical_smp_processor_id(); -#endif + cpumask_copy(cpu_callin_mask, cpumask_of(0)); + mb(); + current_thread_info()->cpu = 0; /* needed? */ for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); } set_cpu_sibling_map(0); @@ -1139,8 +1069,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) bsp_end_local_APIC_setup(); - map_cpu_to_logical_apicid(); - if (apic->setup_portio_remap) apic->setup_portio_remap(); diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index b35786dc9b8f..5f181742e8f9 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -340,3 +340,6 @@ ENTRY(sys_call_table) .long sys_fanotify_init .long sys_fanotify_mark .long sys_prlimit64 /* 340 */ + .long sys_name_to_handle_at + .long sys_open_by_handle_at + .long sys_clock_adjtime diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a375616d77f7..a91ae7709b49 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -2,39 +2,41 @@ #include <linux/memblock.h> #include <asm/trampoline.h> +#include <asm/cacheflush.h> #include <asm/pgtable.h> -#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) -#define __trampinit -#define __trampinitdata -#else -#define __trampinit __cpuinit -#define __trampinitdata __cpuinitdata -#endif +unsigned char *x86_trampoline_base; -/* ready for x86_64 and x86 */ -unsigned char *__trampinitdata trampoline_base; - -void __init reserve_trampoline_memory(void) +void __init setup_trampolines(void) { phys_addr_t mem; + size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); /* Has to be in very low memory so we can execute real-mode AP code. */ - mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); + mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); if (mem == MEMBLOCK_ERROR) panic("Cannot allocate trampoline\n"); - trampoline_base = __va(mem); - memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); + x86_trampoline_base = __va(mem); + memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); + + printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", + x86_trampoline_base, (unsigned long long)mem, size); + + memcpy(x86_trampoline_base, x86_trampoline_start, size); } /* - * Currently trivial. Write the real->protected mode - * bootstrap into the page concerned. The caller - * has made sure it's suitably aligned. + * setup_trampolines() gets called very early, to guarantee the + * availability of low memory. This is before the proper kernel page + * tables are set up, so we cannot set page permissions in that + * function. Thus, we use an arch_initcall instead. */ -unsigned long __trampinit setup_trampoline(void) +static int __init configure_trampolines(void) { - memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); - return virt_to_phys(trampoline_base); + size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); + + set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT); + return 0; } +arch_initcall(configure_trampolines); diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 8508237e8e43..451c0a7ef7fd 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S @@ -32,9 +32,11 @@ #include <asm/segment.h> #include <asm/page_types.h> -/* We can free up trampoline after bootup if cpu hotplug is not supported. */ -__CPUINITRODATA -.code16 +#ifdef CONFIG_SMP + + .section ".x86_trampoline","a" + .balign PAGE_SIZE + .code16 ENTRY(trampoline_data) r_base = . @@ -44,7 +46,7 @@ r_base = . cli # We should be safe anyway - movl $0xA5A5A5A5, trampoline_data - r_base + movl $0xA5A5A5A5, trampoline_status - r_base # write marker for master knows we're running /* GDT tables in non default location kernel can be beyond 16MB and @@ -72,5 +74,10 @@ boot_idt_descr: .word 0 # idt limit = 0 .long 0 # idt base = 0L +ENTRY(trampoline_status) + .long 0 + .globl trampoline_end trampoline_end: + +#endif /* CONFIG_SMP */ diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 075d130efcf9..09ff51799e96 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -32,13 +32,9 @@ #include <asm/segment.h> #include <asm/processor-flags.h> -#ifdef CONFIG_ACPI_SLEEP -.section .rodata, "a", @progbits -#else -/* We can free up the trampoline after bootup if cpu hotplug is not supported. */ -__CPUINITRODATA -#endif -.code16 + .section ".x86_trampoline","a" + .balign PAGE_SIZE + .code16 ENTRY(trampoline_data) r_base = . @@ -50,7 +46,7 @@ r_base = . mov %ax, %ss - movl $0xA5A5A5A5, trampoline_data - r_base + movl $0xA5A5A5A5, trampoline_status - r_base # write marker for master knows we're running # Setup stack @@ -64,10 +60,13 @@ r_base = . movzx %ax, %esi # Find the 32bit trampoline location shll $4, %esi - # Fixup the vectors - addl %esi, startup_32_vector - r_base - addl %esi, startup_64_vector - r_base - addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer + # Fixup the absolute vectors + leal (startup_32 - r_base)(%esi), %eax + movl %eax, startup_32_vector - r_base + leal (startup_64 - r_base)(%esi), %eax + movl %eax, startup_64_vector - r_base + leal (tgdt - r_base)(%esi), %eax + movl %eax, (tgdt + 2 - r_base) /* * GDT tables in non default location kernel can be beyond 16MB and @@ -129,6 +128,7 @@ no_longmode: jmp no_longmode #include "verify_cpu.S" + .balign 4 # Careful these need to be in the same 64K segment as the above; tidt: .word 0 # idt limit = 0 @@ -156,6 +156,10 @@ startup_64_vector: .long startup_64 - r_base .word __KERNEL_CS, 0 + .balign 4 +ENTRY(trampoline_status) + .long 0 + trampoline_stack: .org 0x1000 trampoline_stack_end: diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index bf4700755184..624a2016198e 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -105,6 +105,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + ENTRY_TEXT IRQENTRY_TEXT *(.fixup) *(.gnu.warning) @@ -230,7 +231,7 @@ SECTIONS * output PHDR, so the next output section - .init.text - should * start another segment - init. */ - PERCPU_VADDR(0, :percpu) + PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) #endif INIT_TEXT_SECTION(PAGE_SIZE) @@ -240,6 +241,18 @@ SECTIONS INIT_DATA_SECTION(16) + /* + * Code and data for a variety of lowlevel trampolines, to be + * copied into base memory (< 1 MiB) during initialization. + * Since it is copied early, the main copy can be discarded + * afterwards. + */ + .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) { + x86_trampoline_start = .; + *(.x86_trampoline) + x86_trampoline_end = .; + } + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; *(.x86_cpu_dev.init) @@ -291,6 +304,7 @@ SECTIONS *(.iommu_table) __iommu_table_end = .; } + . = ALIGN(8); /* * .exit.text is discard at runtime, not link time, to deal with @@ -305,7 +319,7 @@ SECTIONS } #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) - PERCPU(THREAD_SIZE) + PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) #endif . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 1b950d151e58..9796c2f3d074 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -52,6 +52,7 @@ extern void *__memcpy(void *, const void *, __kernel_size_t); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(empty_zero_page); #ifndef CONFIG_PARAVIRT diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index ceb2911aa439..c11514e9128b 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -70,6 +70,7 @@ struct x86_init_ops x86_init __initdata = { .setup_percpu_clockev = setup_boot_APIC_clock, .tsc_pre_init = x86_init_noop, .timer_init = hpet_time_init, + .wallclock_init = x86_init_noop, }, .iommu = { diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 1357d7cf4ec8..db932760ea82 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -62,21 +62,21 @@ TRACE_EVENT(kvm_hv_hypercall, TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), TP_STRUCT__entry( - __field( __u16, code ) - __field( bool, fast ) __field( __u16, rep_cnt ) __field( __u16, rep_idx ) __field( __u64, ingpa ) __field( __u64, outgpa ) + __field( __u16, code ) + __field( bool, fast ) ), TP_fast_assign( - __entry->code = code; - __entry->fast = fast; __entry->rep_cnt = rep_cnt; __entry->rep_idx = rep_idx; __entry->ingpa = ingpa; __entry->outgpa = outgpa; + __entry->code = code; + __entry->fast = fast; ), TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index eba687f0cc0c..b9ec1c74943c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -847,7 +847,7 @@ static void __init lguest_init_IRQ(void) void lguest_setup_irq(unsigned int irq) { irq_alloc_desc_at(irq, 0); - set_irq_chip_and_handler_name(irq, &lguest_irq_controller, + irq_set_chip_and_handler_name(irq, &lguest_irq_controller, handle_level_irq, "level"); } @@ -995,7 +995,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) static void lguest_time_init(void) { /* Set up the timer interrupt (0) to go to our simple timer routine */ - set_irq_handler(0, lguest_time_irq); + irq_set_handler(0, lguest_time_irq); clocksource_register(&lguest_clock); diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index e10cf070ede0..f2479f19ddde 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -42,4 +42,5 @@ else lib-y += memmove_64.o memset_64.o lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o + lib-y += cmpxchg16b_emu.o endif diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index 2cda60a06e65..e8e7e0d06f42 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -15,14 +15,12 @@ /* if you want SMP support, implement these with real spinlocks */ .macro LOCK reg - pushfl - CFI_ADJUST_CFA_OFFSET 4 + pushfl_cfi cli .endm .macro UNLOCK reg - popfl - CFI_ADJUST_CFA_OFFSET -4 + popfl_cfi .endm #define BEGIN(op) \ diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 71e080de3352..391a083674b4 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -14,14 +14,12 @@ #include <asm/dwarf2.h> .macro SAVE reg - pushl %\reg - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %\reg CFI_REL_OFFSET \reg, 0 .endm .macro RESTORE reg - popl %\reg - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %\reg CFI_RESTORE \reg .endm diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index adbccd0bbb78..78d16a554db0 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -50,11 +50,9 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) */ ENTRY(csum_partial) CFI_STARTPROC - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %esi CFI_REL_OFFSET esi, 0 - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len @@ -132,11 +130,9 @@ ENTRY(csum_partial) jz 8f roll $8, %eax 8: - popl %ebx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebx CFI_RESTORE ebx - popl %esi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %esi CFI_RESTORE esi ret CFI_ENDPROC @@ -148,11 +144,9 @@ ENDPROC(csum_partial) ENTRY(csum_partial) CFI_STARTPROC - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %esi CFI_REL_OFFSET esi, 0 - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len @@ -260,11 +254,9 @@ ENTRY(csum_partial) jz 90f roll $8, %eax 90: - popl %ebx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebx CFI_RESTORE ebx - popl %esi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %esi CFI_RESTORE esi ret CFI_ENDPROC @@ -309,14 +301,11 @@ ENTRY(csum_partial_copy_generic) CFI_STARTPROC subl $4,%esp CFI_ADJUST_CFA_OFFSET 4 - pushl %edi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edi CFI_REL_OFFSET edi, 0 - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %esi CFI_REL_OFFSET esi, 0 - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+12(%esp),%ecx # len @@ -426,17 +415,13 @@ DST( movb %cl, (%edi) ) .previous - popl %ebx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebx CFI_RESTORE ebx - popl %esi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %esi CFI_RESTORE esi - popl %edi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %edi CFI_RESTORE edi - popl %ecx # equivalent to addl $4,%esp - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ecx # equivalent to addl $4,%esp ret CFI_ENDPROC ENDPROC(csum_partial_copy_generic) @@ -459,14 +444,11 @@ ENDPROC(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic) CFI_STARTPROC - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 - pushl %edi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edi CFI_REL_OFFSET edi, 0 - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %esi CFI_REL_OFFSET esi, 0 movl ARGBASE+4(%esp),%esi #src movl ARGBASE+8(%esp),%edi #dst @@ -527,14 +509,11 @@ DST( movb %dl, (%edi) ) jmp 7b .previous - popl %esi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %esi CFI_RESTORE esi - popl %edi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %edi CFI_RESTORE edi - popl %ebx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebx CFI_RESTORE ebx ret CFI_ENDPROC diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S new file mode 100644 index 000000000000..3e8b08a6de2b --- /dev/null +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -0,0 +1,59 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + * + */ +#include <linux/linkage.h> +#include <asm/alternative-asm.h> +#include <asm/frame.h> +#include <asm/dwarf2.h> + +.text + +/* + * Inputs: + * %rsi : memory location to compare + * %rax : low 64 bits of old value + * %rdx : high 64 bits of old value + * %rbx : low 64 bits of new value + * %rcx : high 64 bits of new value + * %al : Operation successful + */ +ENTRY(this_cpu_cmpxchg16b_emu) +CFI_STARTPROC + +# +# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not +# via the ZF. Caller will access %al to get result. +# +# Note that this is only useful for a cpuops operation. Meaning that we +# do *not* have a fully atomic operation but just an operation that is +# *atomic* on a single cpu (as provided by the this_cpu_xx class of +# macros). +# +this_cpu_cmpxchg16b_emu: + pushf + cli + + cmpq %gs:(%rsi), %rax + jne not_same + cmpq %gs:8(%rsi), %rdx + jne not_same + + movq %rbx, %gs:(%rsi) + movq %rcx, %gs:8(%rsi) + + popf + mov $1, %al + ret + + not_same: + popf + xor %al,%al + ret + +CFI_ENDPROC + +ENDPROC(this_cpu_cmpxchg16b_emu) diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S new file mode 100644 index 000000000000..0ecb8433e5a8 --- /dev/null +++ b/arch/x86/lib/memmove_64.S @@ -0,0 +1,197 @@ +/* + * Normally compiler builtins are used, but sometimes the compiler calls out + * of line code. Based on asm-i386/string.h. + * + * This assembly file is re-written from memmove_64.c file. + * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> + */ +#define _STRING_C +#include <linux/linkage.h> +#include <asm/dwarf2.h> + +#undef memmove + +/* + * Implement memmove(). This can handle overlap between src and dst. + * + * Input: + * rdi: dest + * rsi: src + * rdx: count + * + * Output: + * rax: dest + */ +ENTRY(memmove) + CFI_STARTPROC + /* Handle more 32bytes in loop */ + mov %rdi, %rax + cmp $0x20, %rdx + jb 1f + + /* Decide forward/backward copy mode */ + cmp %rdi, %rsi + jb 2f + + /* + * movsq instruction have many startup latency + * so we handle small size by general register. + */ + cmp $680, %rdx + jb 3f + /* + * movsq instruction is only good for aligned case. + */ + + cmpb %dil, %sil + je 4f +3: + sub $0x20, %rdx + /* + * We gobble 32byts forward in each loop. + */ +5: + sub $0x20, %rdx + movq 0*8(%rsi), %r11 + movq 1*8(%rsi), %r10 + movq 2*8(%rsi), %r9 + movq 3*8(%rsi), %r8 + leaq 4*8(%rsi), %rsi + + movq %r11, 0*8(%rdi) + movq %r10, 1*8(%rdi) + movq %r9, 2*8(%rdi) + movq %r8, 3*8(%rdi) + leaq 4*8(%rdi), %rdi + jae 5b + addq $0x20, %rdx + jmp 1f + /* + * Handle data forward by movsq. + */ + .p2align 4 +4: + movq %rdx, %rcx + movq -8(%rsi, %rdx), %r11 + lea -8(%rdi, %rdx), %r10 + shrq $3, %rcx + rep movsq + movq %r11, (%r10) + jmp 13f + /* + * Handle data backward by movsq. + */ + .p2align 4 +7: + movq %rdx, %rcx + movq (%rsi), %r11 + movq %rdi, %r10 + leaq -8(%rsi, %rdx), %rsi + leaq -8(%rdi, %rdx), %rdi + shrq $3, %rcx + std + rep movsq + cld + movq %r11, (%r10) + jmp 13f + + /* + * Start to prepare for backward copy. + */ + .p2align 4 +2: + cmp $680, %rdx + jb 6f + cmp %dil, %sil + je 7b +6: + /* + * Calculate copy position to tail. + */ + addq %rdx, %rsi + addq %rdx, %rdi + subq $0x20, %rdx + /* + * We gobble 32byts backward in each loop. + */ +8: + subq $0x20, %rdx + movq -1*8(%rsi), %r11 + movq -2*8(%rsi), %r10 + movq -3*8(%rsi), %r9 + movq -4*8(%rsi), %r8 + leaq -4*8(%rsi), %rsi + + movq %r11, -1*8(%rdi) + movq %r10, -2*8(%rdi) + movq %r9, -3*8(%rdi) + movq %r8, -4*8(%rdi) + leaq -4*8(%rdi), %rdi + jae 8b + /* + * Calculate copy position to head. + */ + addq $0x20, %rdx + subq %rdx, %rsi + subq %rdx, %rdi +1: + cmpq $16, %rdx + jb 9f + /* + * Move data from 16 bytes to 31 bytes. + */ + movq 0*8(%rsi), %r11 + movq 1*8(%rsi), %r10 + movq -2*8(%rsi, %rdx), %r9 + movq -1*8(%rsi, %rdx), %r8 + movq %r11, 0*8(%rdi) + movq %r10, 1*8(%rdi) + movq %r9, -2*8(%rdi, %rdx) + movq %r8, -1*8(%rdi, %rdx) + jmp 13f + .p2align 4 +9: + cmpq $8, %rdx + jb 10f + /* + * Move data from 8 bytes to 15 bytes. + */ + movq 0*8(%rsi), %r11 + movq -1*8(%rsi, %rdx), %r10 + movq %r11, 0*8(%rdi) + movq %r10, -1*8(%rdi, %rdx) + jmp 13f +10: + cmpq $4, %rdx + jb 11f + /* + * Move data from 4 bytes to 7 bytes. + */ + movl (%rsi), %r11d + movl -4(%rsi, %rdx), %r10d + movl %r11d, (%rdi) + movl %r10d, -4(%rdi, %rdx) + jmp 13f +11: + cmp $2, %rdx + jb 12f + /* + * Move data from 2 bytes to 3 bytes. + */ + movw (%rsi), %r11w + movw -2(%rsi, %rdx), %r10w + movw %r11w, (%rdi) + movw %r10w, -2(%rdi, %rdx) + jmp 13f +12: + cmp $1, %rdx + jb 13f + /* + * Move data for 1 byte. + */ + movb (%rsi), %r11b + movb %r11b, (%rdi) +13: + retq + CFI_ENDPROC +ENDPROC(memmove) diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c deleted file mode 100644 index 6d0f0ec41b34..000000000000 --- a/arch/x86/lib/memmove_64.c +++ /dev/null @@ -1,192 +0,0 @@ -/* Normally compiler builtins are used, but sometimes the compiler calls out - of line code. Based on asm-i386/string.h. - */ -#define _STRING_C -#include <linux/string.h> -#include <linux/module.h> - -#undef memmove -void *memmove(void *dest, const void *src, size_t count) -{ - unsigned long d0,d1,d2,d3,d4,d5,d6,d7; - char *ret; - - __asm__ __volatile__( - /* Handle more 32bytes in loop */ - "mov %2, %3\n\t" - "cmp $0x20, %0\n\t" - "jb 1f\n\t" - - /* Decide forward/backward copy mode */ - "cmp %2, %1\n\t" - "jb 2f\n\t" - - /* - * movsq instruction have many startup latency - * so we handle small size by general register. - */ - "cmp $680, %0\n\t" - "jb 3f\n\t" - /* - * movsq instruction is only good for aligned case. - */ - "cmpb %%dil, %%sil\n\t" - "je 4f\n\t" - "3:\n\t" - "sub $0x20, %0\n\t" - /* - * We gobble 32byts forward in each loop. - */ - "5:\n\t" - "sub $0x20, %0\n\t" - "movq 0*8(%1), %4\n\t" - "movq 1*8(%1), %5\n\t" - "movq 2*8(%1), %6\n\t" - "movq 3*8(%1), %7\n\t" - "leaq 4*8(%1), %1\n\t" - - "movq %4, 0*8(%2)\n\t" - "movq %5, 1*8(%2)\n\t" - "movq %6, 2*8(%2)\n\t" - "movq %7, 3*8(%2)\n\t" - "leaq 4*8(%2), %2\n\t" - "jae 5b\n\t" - "addq $0x20, %0\n\t" - "jmp 1f\n\t" - /* - * Handle data forward by movsq. - */ - ".p2align 4\n\t" - "4:\n\t" - "movq %0, %8\n\t" - "movq -8(%1, %0), %4\n\t" - "lea -8(%2, %0), %5\n\t" - "shrq $3, %8\n\t" - "rep movsq\n\t" - "movq %4, (%5)\n\t" - "jmp 13f\n\t" - /* - * Handle data backward by movsq. - */ - ".p2align 4\n\t" - "7:\n\t" - "movq %0, %8\n\t" - "movq (%1), %4\n\t" - "movq %2, %5\n\t" - "leaq -8(%1, %0), %1\n\t" - "leaq -8(%2, %0), %2\n\t" - "shrq $3, %8\n\t" - "std\n\t" - "rep movsq\n\t" - "cld\n\t" - "movq %4, (%5)\n\t" - "jmp 13f\n\t" - - /* - * Start to prepare for backward copy. - */ - ".p2align 4\n\t" - "2:\n\t" - "cmp $680, %0\n\t" - "jb 6f \n\t" - "cmp %%dil, %%sil\n\t" - "je 7b \n\t" - "6:\n\t" - /* - * Calculate copy position to tail. - */ - "addq %0, %1\n\t" - "addq %0, %2\n\t" - "subq $0x20, %0\n\t" - /* - * We gobble 32byts backward in each loop. - */ - "8:\n\t" - "subq $0x20, %0\n\t" - "movq -1*8(%1), %4\n\t" - "movq -2*8(%1), %5\n\t" - "movq -3*8(%1), %6\n\t" - "movq -4*8(%1), %7\n\t" - "leaq -4*8(%1), %1\n\t" - - "movq %4, -1*8(%2)\n\t" - "movq %5, -2*8(%2)\n\t" - "movq %6, -3*8(%2)\n\t" - "movq %7, -4*8(%2)\n\t" - "leaq -4*8(%2), %2\n\t" - "jae 8b\n\t" - /* - * Calculate copy position to head. - */ - "addq $0x20, %0\n\t" - "subq %0, %1\n\t" - "subq %0, %2\n\t" - "1:\n\t" - "cmpq $16, %0\n\t" - "jb 9f\n\t" - /* - * Move data from 16 bytes to 31 bytes. - */ - "movq 0*8(%1), %4\n\t" - "movq 1*8(%1), %5\n\t" - "movq -2*8(%1, %0), %6\n\t" - "movq -1*8(%1, %0), %7\n\t" - "movq %4, 0*8(%2)\n\t" - "movq %5, 1*8(%2)\n\t" - "movq %6, -2*8(%2, %0)\n\t" - "movq %7, -1*8(%2, %0)\n\t" - "jmp 13f\n\t" - ".p2align 4\n\t" - "9:\n\t" - "cmpq $8, %0\n\t" - "jb 10f\n\t" - /* - * Move data from 8 bytes to 15 bytes. - */ - "movq 0*8(%1), %4\n\t" - "movq -1*8(%1, %0), %5\n\t" - "movq %4, 0*8(%2)\n\t" - "movq %5, -1*8(%2, %0)\n\t" - "jmp 13f\n\t" - "10:\n\t" - "cmpq $4, %0\n\t" - "jb 11f\n\t" - /* - * Move data from 4 bytes to 7 bytes. - */ - "movl (%1), %4d\n\t" - "movl -4(%1, %0), %5d\n\t" - "movl %4d, (%2)\n\t" - "movl %5d, -4(%2, %0)\n\t" - "jmp 13f\n\t" - "11:\n\t" - "cmp $2, %0\n\t" - "jb 12f\n\t" - /* - * Move data from 2 bytes to 3 bytes. - */ - "movw (%1), %4w\n\t" - "movw -2(%1, %0), %5w\n\t" - "movw %4w, (%2)\n\t" - "movw %5w, -2(%2, %0)\n\t" - "jmp 13f\n\t" - "12:\n\t" - "cmp $1, %0\n\t" - "jb 13f\n\t" - /* - * Move data for 1 byte. - */ - "movb (%1), %4b\n\t" - "movb %4b, (%2)\n\t" - "13:\n\t" - : "=&d" (d0), "=&S" (d1), "=&D" (d2), "=&a" (ret) , - "=r"(d3), "=r"(d4), "=r"(d5), "=r"(d6), "=&c" (d7) - :"0" (count), - "1" (src), - "2" (dest) - :"memory"); - - return ret; - -} -EXPORT_SYMBOL(memmove); diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S index 41fcf00e49df..67743977398b 100644 --- a/arch/x86/lib/rwsem_64.S +++ b/arch/x86/lib/rwsem_64.S @@ -23,43 +23,50 @@ #include <asm/dwarf2.h> #define save_common_regs \ - pushq %rdi; \ - pushq %rsi; \ - pushq %rcx; \ - pushq %r8; \ - pushq %r9; \ - pushq %r10; \ - pushq %r11 + pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ + pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ + pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \ + pushq_cfi %r8; CFI_REL_OFFSET r8, 0; \ + pushq_cfi %r9; CFI_REL_OFFSET r9, 0; \ + pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \ + pushq_cfi %r11; CFI_REL_OFFSET r11, 0 #define restore_common_regs \ - popq %r11; \ - popq %r10; \ - popq %r9; \ - popq %r8; \ - popq %rcx; \ - popq %rsi; \ - popq %rdi + popq_cfi %r11; CFI_RESTORE r11; \ + popq_cfi %r10; CFI_RESTORE r10; \ + popq_cfi %r9; CFI_RESTORE r9; \ + popq_cfi %r8; CFI_RESTORE r8; \ + popq_cfi %rcx; CFI_RESTORE rcx; \ + popq_cfi %rsi; CFI_RESTORE rsi; \ + popq_cfi %rdi; CFI_RESTORE rdi /* Fix up special calling conventions */ ENTRY(call_rwsem_down_read_failed) + CFI_STARTPROC save_common_regs - pushq %rdx + pushq_cfi %rdx + CFI_REL_OFFSET rdx, 0 movq %rax,%rdi call rwsem_down_read_failed - popq %rdx + popq_cfi %rdx + CFI_RESTORE rdx restore_common_regs ret - ENDPROC(call_rwsem_down_read_failed) + CFI_ENDPROC +ENDPROC(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_write_failed) + CFI_STARTPROC save_common_regs movq %rax,%rdi call rwsem_down_write_failed restore_common_regs ret - ENDPROC(call_rwsem_down_write_failed) + CFI_ENDPROC +ENDPROC(call_rwsem_down_write_failed) ENTRY(call_rwsem_wake) + CFI_STARTPROC decl %edx /* do nothing if still outstanding active readers */ jnz 1f save_common_regs @@ -67,15 +74,20 @@ ENTRY(call_rwsem_wake) call rwsem_wake restore_common_regs 1: ret - ENDPROC(call_rwsem_wake) + CFI_ENDPROC +ENDPROC(call_rwsem_wake) /* Fix up special calling conventions */ ENTRY(call_rwsem_downgrade_wake) + CFI_STARTPROC save_common_regs - pushq %rdx + pushq_cfi %rdx + CFI_REL_OFFSET rdx, 0 movq %rax,%rdi call rwsem_downgrade_wake - popq %rdx + popq_cfi %rdx + CFI_RESTORE rdx restore_common_regs ret - ENDPROC(call_rwsem_downgrade_wake) + CFI_ENDPROC +ENDPROC(call_rwsem_downgrade_wake) diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index 648fe4741782..06691daa4108 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S @@ -36,7 +36,7 @@ */ #ifdef CONFIG_SMP ENTRY(__write_lock_failed) - CFI_STARTPROC simple + CFI_STARTPROC FRAME 2: LOCK_PREFIX addl $ RW_LOCK_BIAS,(%eax) @@ -74,29 +74,23 @@ ENTRY(__read_lock_failed) /* Fix up special calling conventions */ ENTRY(call_rwsem_down_read_failed) CFI_STARTPROC - push %ecx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx CFI_REL_OFFSET ecx,0 - push %edx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edx CFI_REL_OFFSET edx,0 call rwsem_down_read_failed - pop %edx - CFI_ADJUST_CFA_OFFSET -4 - pop %ecx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %edx + popl_cfi %ecx ret CFI_ENDPROC ENDPROC(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_write_failed) CFI_STARTPROC - push %ecx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx CFI_REL_OFFSET ecx,0 calll rwsem_down_write_failed - pop %ecx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ecx ret CFI_ENDPROC ENDPROC(call_rwsem_down_write_failed) @@ -105,12 +99,10 @@ ENTRY(call_rwsem_wake) CFI_STARTPROC decw %dx /* do nothing if still outstanding active readers */ jnz 1f - push %ecx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx CFI_REL_OFFSET ecx,0 call rwsem_wake - pop %ecx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ecx 1: ret CFI_ENDPROC ENDPROC(call_rwsem_wake) @@ -118,17 +110,13 @@ ENTRY(call_rwsem_wake) /* Fix up special calling conventions */ ENTRY(call_rwsem_downgrade_wake) CFI_STARTPROC - push %ecx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx CFI_REL_OFFSET ecx,0 - push %edx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edx CFI_REL_OFFSET edx,0 call rwsem_downgrade_wake - pop %edx - CFI_ADJUST_CFA_OFFSET -4 - pop %ecx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %edx + popl_cfi %ecx ret CFI_ENDPROC ENDPROC(call_rwsem_downgrade_wake) diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S index 650b11e00ecc..2930ae05d773 100644 --- a/arch/x86/lib/thunk_32.S +++ b/arch/x86/lib/thunk_32.S @@ -7,24 +7,6 @@ #include <linux/linkage.h> -#define ARCH_TRACE_IRQS_ON \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call trace_hardirqs_on; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - -#define ARCH_TRACE_IRQS_OFF \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call trace_hardirqs_off; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - #ifdef CONFIG_TRACE_IRQFLAGS /* put return address in eax (arg1) */ .macro thunk_ra name,func diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index bf9a7d5a5428..782b082c9ff7 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S @@ -22,26 +22,6 @@ CFI_ENDPROC .endm - /* rdi: arg1 ... normal C conventions. rax is passed from C. */ - .macro thunk_retrax name,func - .globl \name -\name: - CFI_STARTPROC - SAVE_ARGS - call \func - jmp restore_norax - CFI_ENDPROC - .endm - - - .section .sched.text, "ax" -#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM - thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed - thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed - thunk rwsem_wake_thunk,rwsem_wake - thunk rwsem_downgrade_thunk,rwsem_downgrade_wake -#endif - #ifdef CONFIG_TRACE_IRQFLAGS /* put return address in rdi (arg1) */ .macro thunk_ra name,func @@ -72,10 +52,3 @@ restore: RESTORE_ARGS ret CFI_ENDPROC - - CFI_STARTPROC - SAVE_ARGS -restore_norax: - RESTORE_ARGS 1 - ret - CFI_ENDPROC diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 09df2f9a3d69..3e608edf9958 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index f21962c435ed..0919c26820d4 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -26,9 +26,7 @@ #include <asm/apic.h> #include <asm/amd_nb.h> -static struct bootnode __initdata nodes[8]; static unsigned char __initdata nodeids[8]; -static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; static __init int find_northbridge(void) { @@ -51,7 +49,7 @@ static __init int find_northbridge(void) return num; } - return -1; + return -ENOENT; } static __init void early_get_boot_cpu_id(void) @@ -69,17 +67,18 @@ static __init void early_get_boot_cpu_id(void) #endif } -int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) +int __init amd_numa_init(void) { - unsigned long start = PFN_PHYS(start_pfn); - unsigned long end = PFN_PHYS(end_pfn); + unsigned long start = PFN_PHYS(0); + unsigned long end = PFN_PHYS(max_pfn); unsigned numnodes; unsigned long prevbase; - int i, nb, found = 0; + int i, j, nb; u32 nodeid, reg; + unsigned int bits, cores, apicid_base; if (!early_pci_allowed()) - return -1; + return -EINVAL; nb = find_northbridge(); if (nb < 0) @@ -90,7 +89,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) reg = read_pci_config(0, nb, 0, 0x60); numnodes = ((reg >> 4) & 0xF) + 1; if (numnodes <= 1) - return -1; + return -ENOENT; pr_info("Number of physical nodes %d\n", numnodes); @@ -121,9 +120,9 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) if ((base >> 8) & 3 || (limit >> 8) & 3) { pr_err("Node %d using interleaving mode %lx/%lx\n", nodeid, (base >> 8) & 3, (limit >> 8) & 3); - return -1; + return -EINVAL; } - if (node_isset(nodeid, nodes_parsed)) { + if (node_isset(nodeid, numa_nodes_parsed)) { pr_info("Node %d already present, skipping\n", nodeid); continue; @@ -160,117 +159,28 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) if (prevbase > base) { pr_err("Node map not sorted %lx,%lx\n", prevbase, base); - return -1; + return -EINVAL; } pr_info("Node %d MemBase %016lx Limit %016lx\n", nodeid, base, limit); - found++; - - nodes[nodeid].start = base; - nodes[nodeid].end = limit; - prevbase = base; - - node_set(nodeid, nodes_parsed); - } - - if (!found) - return -1; - return 0; -} - -#ifdef CONFIG_NUMA_EMU -static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; - -void __init amd_get_nodes(struct bootnode *physnodes) -{ - int i; - - for_each_node_mask(i, nodes_parsed) { - physnodes[i].start = nodes[i].start; - physnodes[i].end = nodes[i].end; + numa_add_memblk(nodeid, base, limit); + node_set(nodeid, numa_nodes_parsed); } -} - -static int __init find_node_by_addr(unsigned long addr) -{ - int ret = NUMA_NO_NODE; - int i; - - for (i = 0; i < 8; i++) - if (addr >= nodes[i].start && addr < nodes[i].end) { - ret = i; - break; - } - return ret; -} -/* - * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be - * setup to represent the physical topology but reflect the emulated - * environment. For each emulated node, the real node which it appears on is - * found and a fake pxm to nid mapping is created which mirrors the actual - * locality. node_distance() then represents the correct distances between - * emulated nodes by using the fake acpi mappings to pxms. - */ -void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) -{ - unsigned int bits; - unsigned int cores; - unsigned int apicid_base = 0; - int i; + if (!nodes_weight(numa_nodes_parsed)) + return -ENOENT; + /* + * We seem to have valid NUMA configuration. Map apicids to nodes + * using the coreid bits from early_identify_cpu. + */ bits = boot_cpu_data.x86_coreid_bits; cores = 1 << bits; - early_get_boot_cpu_id(); - if (boot_cpu_physical_apicid > 0) - apicid_base = boot_cpu_physical_apicid; - - for (i = 0; i < nr_nodes; i++) { - int index; - int nid; - int j; - - nid = find_node_by_addr(nodes[i].start); - if (nid == NUMA_NO_NODE) - continue; - - index = nodeids[nid] << bits; - if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE) - for (j = apicid_base; j < cores + apicid_base; j++) - fake_apicid_to_node[index + j] = i; -#ifdef CONFIG_ACPI_NUMA - __acpi_map_pxm_to_node(nid, i); -#endif - } - memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); -} -#endif /* CONFIG_NUMA_EMU */ - -int __init amd_scan_nodes(void) -{ - unsigned int bits; - unsigned int cores; - unsigned int apicid_base; - int i; - - BUG_ON(nodes_empty(nodes_parsed)); - node_possible_map = nodes_parsed; - memnode_shift = compute_hash_shift(nodes, 8, NULL); - if (memnode_shift < 0) { - pr_err("No NUMA node hash function found. Contact maintainer\n"); - return -1; - } - pr_info("Using node hash shift of %d\n", memnode_shift); - - /* use the coreid bits from early_identify_cpu */ - bits = boot_cpu_data.x86_coreid_bits; - cores = (1<<bits); apicid_base = 0; + /* get the APIC ID of the BSP early for systems with apicid lifting */ early_get_boot_cpu_id(); if (boot_cpu_physical_apicid > 0) { @@ -278,17 +188,9 @@ int __init amd_scan_nodes(void) apicid_base = boot_cpu_physical_apicid; } - for_each_node_mask(i, node_possible_map) { - int j; - - memblock_x86_register_active_regions(i, - nodes[i].start >> PAGE_SHIFT, - nodes[i].end >> PAGE_SHIFT); + for_each_node_mask(i, numa_nodes_parsed) for (j = apicid_base; j < cores + apicid_base; j++) - apicid_to_node[(i << bits) + j] = i; - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - } + set_apicid_to_node((i << bits) + j, i); - numa_init_array(); return 0; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d90ceb882a4..20e3f8702d1e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -229,15 +229,14 @@ void vmalloc_sync_all(void) for (address = VMALLOC_START & PMD_MASK; address >= TASK_SIZE && address < FIXADDR_TOP; address += PMD_SIZE) { - - unsigned long flags; struct page *page; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { spinlock_t *pgt_lock; pmd_t *ret; + /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); @@ -247,7 +246,7 @@ void vmalloc_sync_all(void) if (!ret) break; } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } } @@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address); + return; + } + out_of_memory(regs, error_code, address); } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 947f42abe820..286d289b039b 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -18,9 +18,9 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -unsigned long __initdata e820_table_start; -unsigned long __meminitdata e820_table_end; -unsigned long __meminitdata e820_table_top; +unsigned long __initdata pgt_buf_start; +unsigned long __meminitdata pgt_buf_end; +unsigned long __meminitdata pgt_buf_top; int after_bootmem; @@ -33,7 +33,7 @@ int direct_gbpages static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { - unsigned long puds, pmds, ptes, tables, start; + unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; phys_addr_t base; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; @@ -65,29 +65,20 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #ifdef CONFIG_X86_32 /* for fixmap */ tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); -#endif - /* - * RED-PEN putting page tables only on node 0 could - * cause a hotspot and fill up ZONE_DMA. The page tables - * need roughly 0.5KB per GB. - */ -#ifdef CONFIG_X86_32 - start = 0x7000; -#else - start = 0x8000; + good_end = max_pfn_mapped << PAGE_SHIFT; #endif - base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT, - tables, PAGE_SIZE); + + base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); if (base == MEMBLOCK_ERROR) panic("Cannot find space for the kernel page tables"); - e820_table_start = base >> PAGE_SHIFT; - e820_table_end = e820_table_start; - e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); + pgt_buf_start = base >> PAGE_SHIFT; + pgt_buf_end = pgt_buf_start; + pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT); + end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); } struct map_range { @@ -279,30 +270,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, load_cr3(swapper_pg_dir); #endif -#ifdef CONFIG_X86_64 - if (!after_bootmem && !start) { - pud_t *pud; - pmd_t *pmd; - - mmu_cr4_features = read_cr4(); - - /* - * _brk_end cannot change anymore, but it and _end may be - * located on different 2M pages. cleanup_highmap(), however, - * can only consider _end when it runs, so destroy any - * mappings beyond _brk_end here. - */ - pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); - pmd = pmd_offset(pud, _brk_end - 1); - while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) - pmd_clear(pmd); - } -#endif __flush_tlb_all(); - if (!after_bootmem && e820_table_end > e820_table_start) - memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, - e820_table_end << PAGE_SHIFT, "PGTABLE"); + if (!after_bootmem && pgt_buf_end > pgt_buf_start) + memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, + pgt_buf_end << PAGE_SHIFT, "PGTABLE"); if (!after_bootmem) early_memtest(start, end); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c821074b7f0b..73ad7ebd6e9c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -62,10 +62,10 @@ bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { - unsigned long pfn = e820_table_end++; + unsigned long pfn = pgt_buf_end++; void *adr; - if (pfn >= e820_table_top) + if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); @@ -163,8 +163,8 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end - && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start - || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) { + && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start + || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; @@ -644,8 +644,7 @@ void __init find_low_pfn_range(void) } #ifndef CONFIG_NEED_MULTIPLE_NODES -void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8) +void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af80..0aa34669ed3f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -51,6 +51,7 @@ #include <asm/numa.h> #include <asm/cacheflush.h> #include <asm/init.h> +#include <asm/uv/uv.h> static int __init parse_direct_gbpages_off(char *arg) { @@ -105,18 +106,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); - unsigned long flags; struct page *page; if (pgd_none(*pgd_ref)) continue; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(address); + /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); @@ -128,7 +129,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_unlock(pgt_lock); } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } } @@ -314,7 +315,7 @@ void __init cleanup_highmap(void) static __ref void *alloc_low_page(unsigned long *phys) { - unsigned long pfn = e820_table_end++; + unsigned long pfn = pgt_buf_end++; void *adr; if (after_bootmem) { @@ -324,7 +325,7 @@ static __ref void *alloc_low_page(unsigned long *phys) return adr; } - if (pfn >= e820_table_top) + if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); @@ -333,12 +334,28 @@ static __ref void *alloc_low_page(unsigned long *phys) return adr; } +static __ref void *map_low_page(void *virt) +{ + void *adr; + unsigned long phys, left; + + if (after_bootmem) + return virt; + + phys = __pa(virt); + left = phys & (PAGE_SIZE - 1); + adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); + adr = (void *)(((unsigned long)adr) | left); + + return adr; +} + static __ref void unmap_low_page(void *adr) { if (after_bootmem) return; - early_iounmap(adr, PAGE_SIZE); + early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE); } static unsigned long __meminit @@ -386,15 +403,6 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, } static unsigned long __meminit -phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end, - pgprot_t prot) -{ - pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); - - return phys_pte_init(pte, address, end, prot); -} - -static unsigned long __meminit phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, unsigned long page_size_mask, pgprot_t prot) { @@ -420,8 +428,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, if (pmd_val(*pmd)) { if (!pmd_large(*pmd)) { spin_lock(&init_mm.page_table_lock); - last_map_addr = phys_pte_update(pmd, address, + pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd)); + last_map_addr = phys_pte_init(pte, address, end, prot); + unmap_low_page(pte); spin_unlock(&init_mm.page_table_lock); continue; } @@ -468,18 +478,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, } static unsigned long __meminit -phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, - unsigned long page_size_mask, pgprot_t prot) -{ - pmd_t *pmd = pmd_offset(pud, 0); - unsigned long last_map_addr; - - last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot); - __flush_tlb_all(); - return last_map_addr; -} - -static unsigned long __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, unsigned long page_size_mask) { @@ -504,8 +502,11 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, if (pud_val(*pud)) { if (!pud_large(*pud)) { - last_map_addr = phys_pmd_update(pud, addr, end, + pmd = map_low_page(pmd_offset(pud, 0)); + last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, prot); + unmap_low_page(pmd); + __flush_tlb_all(); continue; } /* @@ -553,17 +554,6 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, return last_map_addr; } -static unsigned long __meminit -phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long page_size_mask) -{ - pud_t *pud; - - pud = (pud_t *)pgd_page_vaddr(*pgd); - - return phys_pud_init(pud, addr, end, page_size_mask); -} - unsigned long __meminit kernel_physical_mapping_init(unsigned long start, unsigned long end, @@ -587,8 +577,10 @@ kernel_physical_mapping_init(unsigned long start, next = end; if (pgd_val(*pgd)) { - last_map_addr = phys_pud_update(pgd, __pa(start), + pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd)); + last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), page_size_mask); + unmap_low_page(pud); continue; } @@ -612,10 +604,9 @@ kernel_physical_mapping_init(unsigned long start, } #ifndef CONFIG_NUMA -void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8) +void __init initmem_init(void) { - memblock_x86_register_active_regions(0, start_pfn, end_pfn); + memblock_x86_register_active_regions(0, 0, max_pfn); } #endif @@ -908,6 +899,19 @@ const char *arch_vma_name(struct vm_area_struct *vma) return NULL; } +#ifdef CONFIG_X86_UV +#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) + +unsigned long memory_block_size_bytes(void) +{ + if (is_uv_system()) { + printk(KERN_INFO "UV: memory block size 2GB\n"); + return 2UL * 1024 * 1024 * 1024; + } + return MIN_MEMORY_BLOCK_SIZE; +} +#endif + #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Initialise the sparsemem vmemmap using huge-pages at the PMD level. diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index ebf6d7887a38..9559d360fde7 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -26,12 +26,50 @@ static __init int numa_setup(char *opt) early_param("numa", numa_setup); /* - * Which logical CPUs are on which nodes + * apicid, cpu, node mappings */ +s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { + [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE +}; + cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); /* + * Map cpu index to node index + */ +DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); + +void __cpuinit numa_set_node(int cpu, int node) +{ + int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); + + /* early setting, no percpu area yet */ + if (cpu_to_node_map) { + cpu_to_node_map[cpu] = node; + return; + } + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { + printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); + dump_stack(); + return; + } +#endif + per_cpu(x86_cpu_to_node_map, cpu) = node; + + if (node != NUMA_NO_NODE) + set_cpu_numa_node(cpu, node); +} + +void __cpuinit numa_clear_node(int cpu) +{ + numa_set_node(cpu, NUMA_NO_NODE); +} + +/* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * @@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void) pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); } -#ifdef CONFIG_DEBUG_PER_CPU_MAPS +/* + * There are unfortunately some poorly designed mainboards around that + * only connect memory to a single CPU. This breaks the 1:1 cpu->node + * mapping. To avoid this fill in the mapping for all possible CPUs, + * as the number of CPUs is not known yet. We round robin the existing + * nodes. + */ +void __init numa_init_array(void) +{ + int rr, i; + + rr = first_node(node_online_map); + for (i = 0; i < nr_cpu_ids; i++) { + if (early_cpu_to_node(i) != NUMA_NO_NODE) + continue; + numa_set_node(i, rr); + rr = next_node(rr, node_online_map); + if (rr == MAX_NUMNODES) + rr = first_node(node_online_map); + } +} + +static __init int find_near_online_node(int node) +{ + int n, val; + int min_val = INT_MAX; + int best_node = -1; + + for_each_online_node(n) { + val = node_distance(node, n); + + if (val < min_val) { + min_val = val; + best_node = n; + } + } + + return best_node; +} + +/* + * Setup early cpu_to_node. + * + * Populate cpu_to_node[] only if x86_cpu_to_apicid[], + * and apicid_to_node[] tables have valid entries for a CPU. + * This means we skip cpu_to_node[] initialisation for NUMA + * emulation and faking node case (when running a kernel compiled + * for NUMA on a non NUMA box), which is OK as cpu_to_node[] + * is already initialized in a round robin manner at numa_init_array, + * prior to this call, and this initialization is good enough + * for the fake NUMA cases. + * + * Called before the per_cpu areas are setup. + */ +void __init init_cpu_to_node(void) +{ + int cpu; + u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); + + BUG_ON(cpu_to_apicid == NULL); + + for_each_possible_cpu(cpu) { + int node = numa_cpu_node(cpu); + + if (node == NUMA_NO_NODE) + continue; + if (!node_online(node)) + node = find_near_online_node(node); + numa_set_node(cpu, node); + } +} + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS + +# ifndef CONFIG_NUMA_EMU +void __cpuinit numa_add_cpu(int cpu) +{ + cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); +} +# endif /* !CONFIG_NUMA_EMU */ + +#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ + +int __cpu_to_node(int cpu) +{ + if (early_per_cpu_ptr(x86_cpu_to_node_map)) { + printk(KERN_WARNING + "cpu_to_node(%d): usage too early!\n", cpu); + dump_stack(); + return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; + } + return per_cpu(x86_cpu_to_node_map, cpu); +} +EXPORT_SYMBOL(__cpu_to_node); + +/* + * Same function as cpu_to_node() but used if called before the + * per_cpu areas are setup. + */ +int early_cpu_to_node(int cpu) +{ + if (early_per_cpu_ptr(x86_cpu_to_node_map)) + return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; + + if (!cpu_possible(cpu)) { + printk(KERN_WARNING + "early_cpu_to_node(%d): no per_cpu area!\n", cpu); + dump_stack(); + return NUMA_NO_NODE; + } + return per_cpu(x86_cpu_to_node_map, cpu); +} + +struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) +{ + int node = early_cpu_to_node(cpu); + struct cpumask *mask; + char buf[64]; + + if (node == NUMA_NO_NODE) { + /* early_cpu_to_node() already emits a warning and trace */ + return NULL; + } + mask = node_to_cpumask_map[node]; + if (!mask) { + pr_err("node_to_cpumask_map[%i] NULL\n", node); + dump_stack(); + return NULL; + } + + cpulist_scnprintf(buf, sizeof(buf), mask); + printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", + enable ? "numa_add_cpu" : "numa_remove_cpu", + cpu, node, buf); + return mask; +} + +# ifndef CONFIG_NUMA_EMU +static void __cpuinit numa_set_cpumask(int cpu, int enable) +{ + struct cpumask *mask; + + mask = debug_cpumask_set_cpu(cpu, enable); + if (!mask) + return; + + if (enable) + cpumask_set_cpu(cpu, mask); + else + cpumask_clear_cpu(cpu, mask); +} + +void __cpuinit numa_add_cpu(int cpu) +{ + numa_set_cpumask(cpu, 1); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + numa_set_cpumask(cpu, 0); +} +# endif /* !CONFIG_NUMA_EMU */ + /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ @@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node) return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); -#endif + +#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 84a3e4c9f277..bde3906420df 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); static unsigned long kva_start_pfn; static unsigned long kva_pages; + +int __cpuinit numa_cpu_node(int cpu) +{ + return apic->x86_32_numa_cpu_node(cpu); +} + /* * FLAT - support for basic PC memory model with discontig enabled, essentially * a single node with all available processors in it with a flat @@ -346,8 +352,7 @@ static void init_remap_allocator(int nid) (ulong) node_remap_end_vaddr[nid]); } -void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, - int acpi, int k8) +void __init initmem_init(void) { int nid; long kva_target_pfn; @@ -361,6 +366,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, */ get_memcfg_numa(); + numa_init_array(); kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea1551eebc..9ec0f209a6a4 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -13,31 +13,30 @@ #include <linux/module.h> #include <linux/nodemask.h> #include <linux/sched.h> +#include <linux/acpi.h> #include <asm/e820.h> #include <asm/proto.h> #include <asm/dma.h> -#include <asm/numa.h> #include <asm/acpi.h> #include <asm/amd_nb.h> +#include "numa_internal.h" + struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); -struct memnode memnode; +nodemask_t numa_nodes_parsed __initdata; -s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; +struct memnode memnode; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; -/* - * Map cpu index to node index - */ -DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); +static struct numa_meminfo numa_meminfo __initdata; + +static int numa_distance_cnt; +static u8 *numa_distance; /* * Given a shift value, try to populate memnodemap[] @@ -46,16 +45,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); * 0 if memnodmap[] too small (of shift too small) * -1 if node overlap or lost ram (shift too big) */ -static int __init populate_memnodemap(const struct bootnode *nodes, - int numnodes, int shift, int *nodeids) +static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift) { unsigned long addr, end; int i, res = -1; memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); - for (i = 0; i < numnodes; i++) { - addr = nodes[i].start; - end = nodes[i].end; + for (i = 0; i < mi->nr_blks; i++) { + addr = mi->blk[i].start; + end = mi->blk[i].end; if (addr >= end) continue; if ((end >> shift) >= memnodemapsize) @@ -63,12 +61,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes, do { if (memnodemap[addr >> shift] != NUMA_NO_NODE) return -1; - - if (!nodeids) - memnodemap[addr >> shift] = i; - else - memnodemap[addr >> shift] = nodeids[i]; - + memnodemap[addr >> shift] = mi->blk[i].nid; addr += (1UL << shift); } while (addr < end); res = 1; @@ -86,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void) addr = 0x8000; nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); - nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT, + nodemap_addr = memblock_find_in_range(addr, get_max_mapped(), nodemap_size, L1_CACHE_BYTES); if (nodemap_addr == MEMBLOCK_ERROR) { printk(KERN_ERR @@ -106,16 +99,15 @@ static int __init allocate_cachealigned_memnodemap(void) * The LSB of all start and end addresses in the node map is the value of the * maximum possible shift. */ -static int __init extract_lsb_from_nodes(const struct bootnode *nodes, - int numnodes) +static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi) { int i, nodes_used = 0; unsigned long start, end; unsigned long bitfield = 0, memtop = 0; - for (i = 0; i < numnodes; i++) { - start = nodes[i].start; - end = nodes[i].end; + for (i = 0; i < mi->nr_blks; i++) { + start = mi->blk[i].start; + end = mi->blk[i].end; if (start >= end) continue; bitfield |= start; @@ -131,18 +123,17 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes, return i; } -int __init compute_hash_shift(struct bootnode *nodes, int numnodes, - int *nodeids) +static int __init compute_hash_shift(const struct numa_meminfo *mi) { int shift; - shift = extract_lsb_from_nodes(nodes, numnodes); + shift = extract_lsb_from_nodes(mi); if (allocate_cachealigned_memnodemap()) return -1; printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", shift); - if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) { + if (populate_memnodemap(mi, shift) != 1) { printk(KERN_INFO "Your memory is not aligned you need to " "rebuild your kernel with a bigger NODEMAPSIZE " "shift=%d\n", shift); @@ -188,6 +179,63 @@ static void * __init early_node_mem(int nodeid, unsigned long start, return NULL; } +static int __init numa_add_memblk_to(int nid, u64 start, u64 end, + struct numa_meminfo *mi) +{ + /* ignore zero length blks */ + if (start == end) + return 0; + + /* whine about and ignore invalid blks */ + if (start > end || nid < 0 || nid >= MAX_NUMNODES) { + pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", + nid, start, end); + return 0; + } + + if (mi->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: too many memblk ranges\n"); + return -EINVAL; + } + + mi->blk[mi->nr_blks].start = start; + mi->blk[mi->nr_blks].end = end; + mi->blk[mi->nr_blks].nid = nid; + mi->nr_blks++; + return 0; +} + +/** + * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo + * @idx: Index of memblk to remove + * @mi: numa_meminfo to remove memblk from + * + * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and + * decrementing @mi->nr_blks. + */ +void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) +{ + mi->nr_blks--; + memmove(&mi->blk[idx], &mi->blk[idx + 1], + (mi->nr_blks - idx) * sizeof(mi->blk[0])); +} + +/** + * numa_add_memblk - Add one numa_memblk to numa_meminfo + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * Add a new memblk to the default numa_meminfo. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + return numa_add_memblk_to(nid, start, end, &numa_meminfo); +} + /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) @@ -234,696 +282,386 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) node_set_online(nodeid); } -/* - * There are unfortunately some poorly designed mainboards around that - * only connect memory to a single CPU. This breaks the 1:1 cpu->node - * mapping. To avoid this fill in the mapping for all possible CPUs, - * as the number of CPUs is not known yet. We round robin the existing - * nodes. +/** + * numa_cleanup_meminfo - Cleanup a numa_meminfo + * @mi: numa_meminfo to clean up + * + * Sanitize @mi by merging and removing unncessary memblks. Also check for + * conflicts and clear unused memblks. + * + * RETURNS: + * 0 on success, -errno on failure. */ -void __init numa_init_array(void) +int __init numa_cleanup_meminfo(struct numa_meminfo *mi) { - int rr, i; + const u64 low = 0; + const u64 high = (u64)max_pfn << PAGE_SHIFT; + int i, j, k; - rr = first_node(node_online_map); - for (i = 0; i < nr_cpu_ids; i++) { - if (early_cpu_to_node(i) != NUMA_NO_NODE) - continue; - numa_set_node(i, rr); - rr = next_node(rr, node_online_map); - if (rr == MAX_NUMNODES) - rr = first_node(node_online_map); - } -} - -#ifdef CONFIG_NUMA_EMU -/* Numa emulation */ -static struct bootnode nodes[MAX_NUMNODES] __initdata; -static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; -static char *cmdline __initdata; + for (i = 0; i < mi->nr_blks; i++) { + struct numa_memblk *bi = &mi->blk[i]; -void __init numa_emu_cmdline(char *str) -{ - cmdline = str; -} + /* make sure all blocks are inside the limits */ + bi->start = max(bi->start, low); + bi->end = min(bi->end, high); -static int __init setup_physnodes(unsigned long start, unsigned long end, - int acpi, int amd) -{ - int ret = 0; - int i; - - memset(physnodes, 0, sizeof(physnodes)); -#ifdef CONFIG_ACPI_NUMA - if (acpi) - acpi_get_nodes(physnodes, start, end); -#endif -#ifdef CONFIG_AMD_NUMA - if (amd) - amd_get_nodes(physnodes); -#endif - /* - * Basic sanity checking on the physical node map: there may be errors - * if the SRAT or AMD code incorrectly reported the topology or the mem= - * kernel parameter is used. - */ - for (i = 0; i < MAX_NUMNODES; i++) { - if (physnodes[i].start == physnodes[i].end) - continue; - if (physnodes[i].start > end) { - physnodes[i].end = physnodes[i].start; - continue; - } - if (physnodes[i].end < start) { - physnodes[i].start = physnodes[i].end; + /* and there's no empty block */ + if (bi->start == bi->end) { + numa_remove_memblk_from(i--, mi); continue; } - if (physnodes[i].start < start) - physnodes[i].start = start; - if (physnodes[i].end > end) - physnodes[i].end = end; - ret++; - } - - /* - * If no physical topology was detected, a single node is faked to cover - * the entire address space. - */ - if (!ret) { - physnodes[ret].start = start; - physnodes[ret].end = end; - ret = 1; - } - return ret; -} - -static void __init fake_physnodes(int acpi, int amd, int nr_nodes) -{ - int i; - - BUG_ON(acpi && amd); -#ifdef CONFIG_ACPI_NUMA - if (acpi) - acpi_fake_nodes(nodes, nr_nodes); -#endif -#ifdef CONFIG_AMD_NUMA - if (amd) - amd_fake_nodes(nodes, nr_nodes); -#endif - if (!acpi && !amd) - for (i = 0; i < nr_cpu_ids; i++) - numa_set_node(i, 0); -} - -/* - * Setups up nid to range from addr to addr + size. If the end - * boundary is greater than max_addr, then max_addr is used instead. - * The return value is 0 if there is additional memory left for - * allocation past addr and -1 otherwise. addr is adjusted to be at - * the end of the node. - */ -static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) -{ - int ret = 0; - nodes[nid].start = *addr; - *addr += size; - if (*addr >= max_addr) { - *addr = max_addr; - ret = -1; - } - nodes[nid].end = *addr; - node_set(nid, node_possible_map); - printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, - nodes[nid].start, nodes[nid].end, - (nodes[nid].end - nodes[nid].start) >> 20); - return ret; -} - -/* - * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr - * to max_addr. The return value is the number of nodes allocated. - */ -static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) -{ - nodemask_t physnode_mask = NODE_MASK_NONE; - u64 size; - int big; - int ret = 0; - int i; - - if (nr_nodes <= 0) - return -1; - if (nr_nodes > MAX_NUMNODES) { - pr_info("numa=fake=%d too large, reducing to %d\n", - nr_nodes, MAX_NUMNODES); - nr_nodes = MAX_NUMNODES; - } - - size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; - /* - * Calculate the number of big nodes that can be allocated as a result - * of consolidating the remainder. - */ - big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / - FAKE_NODE_MIN_SIZE; - - size &= FAKE_NODE_MIN_HASH_MASK; - if (!size) { - pr_err("Not enough memory for each node. " - "NUMA emulation disabled.\n"); - return -1; - } - for (i = 0; i < MAX_NUMNODES; i++) - if (physnodes[i].start != physnodes[i].end) - node_set(i, physnode_mask); - - /* - * Continue to fill physical nodes with fake nodes until there is no - * memory left on any of them. - */ - while (nodes_weight(physnode_mask)) { - for_each_node_mask(i, physnode_mask) { - u64 end = physnodes[i].start + size; - u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); - - if (ret < big) - end += FAKE_NODE_MIN_SIZE; + for (j = i + 1; j < mi->nr_blks; j++) { + struct numa_memblk *bj = &mi->blk[j]; + unsigned long start, end; /* - * Continue to add memory to this fake node if its - * non-reserved memory is less than the per-node size. + * See whether there are overlapping blocks. Whine + * about but allow overlaps of the same nid. They + * will be merged below. */ - while (end - physnodes[i].start - - memblock_x86_hole_size(physnodes[i].start, end) < size) { - end += FAKE_NODE_MIN_SIZE; - if (end > physnodes[i].end) { - end = physnodes[i].end; - break; + if (bi->end > bj->start && bi->start < bj->end) { + if (bi->nid != bj->nid) { + pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", + bi->nid, bi->start, bi->end, + bj->nid, bj->start, bj->end); + return -EINVAL; } + pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", + bi->nid, bi->start, bi->end, + bj->start, bj->end); } /* - * If there won't be at least FAKE_NODE_MIN_SIZE of - * non-reserved memory in ZONE_DMA32 for the next node, - * this one must extend to the boundary. + * Join together blocks on the same node, holes + * between which don't overlap with memory on other + * nodes. */ - if (end < dma32_end && dma32_end - end - - memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) - end = dma32_end; - - /* - * If there won't be enough non-reserved memory for the - * next node, this one must extend to the end of the - * physical node. - */ - if (physnodes[i].end - end - - memblock_x86_hole_size(end, physnodes[i].end) < size) - end = physnodes[i].end; - - /* - * Avoid allocating more nodes than requested, which can - * happen as a result of rounding down each node's size - * to FAKE_NODE_MIN_SIZE. - */ - if (nodes_weight(physnode_mask) + ret >= nr_nodes) - end = physnodes[i].end; - - if (setup_node_range(ret++, &physnodes[i].start, - end - physnodes[i].start, - physnodes[i].end) < 0) - node_clear(i, physnode_mask); + if (bi->nid != bj->nid) + continue; + start = max(min(bi->start, bj->start), low); + end = min(max(bi->end, bj->end), high); + for (k = 0; k < mi->nr_blks; k++) { + struct numa_memblk *bk = &mi->blk[k]; + + if (bi->nid == bk->nid) + continue; + if (start < bk->end && end > bk->start) + break; + } + if (k < mi->nr_blks) + continue; + printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", + bi->nid, bi->start, bi->end, bj->start, bj->end, + start, end); + bi->start = start; + bi->end = end; + numa_remove_memblk_from(j--, mi); } } - return ret; -} -/* - * Returns the end address of a node so that there is at least `size' amount of - * non-reserved memory or `max_addr' is reached. - */ -static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) -{ - u64 end = start + size; - - while (end - start - memblock_x86_hole_size(start, end) < size) { - end += FAKE_NODE_MIN_SIZE; - if (end > max_addr) { - end = max_addr; - break; - } + for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { + mi->blk[i].start = mi->blk[i].end = 0; + mi->blk[i].nid = NUMA_NO_NODE; } - return end; + + return 0; } /* - * Sets up fake nodes of `size' interleaved over physical nodes ranging from - * `addr' to `max_addr'. The return value is the number of nodes allocated. + * Set nodes, which have memory in @mi, in *@nodemask. */ -static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) +static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, + const struct numa_meminfo *mi) { - nodemask_t physnode_mask = NODE_MASK_NONE; - u64 min_size; - int ret = 0; int i; - if (!size) - return -1; - /* - * The limit on emulated nodes is MAX_NUMNODES, so the size per node is - * increased accordingly if the requested size is too small. This - * creates a uniform distribution of node sizes across the entire - * machine (but not necessarily over physical nodes). - */ - min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / - MAX_NUMNODES; - min_size = max(min_size, FAKE_NODE_MIN_SIZE); - if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) - min_size = (min_size + FAKE_NODE_MIN_SIZE) & - FAKE_NODE_MIN_HASH_MASK; - if (size < min_size) { - pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", - size >> 20, min_size >> 20); - size = min_size; - } - size &= FAKE_NODE_MIN_HASH_MASK; - - for (i = 0; i < MAX_NUMNODES; i++) - if (physnodes[i].start != physnodes[i].end) - node_set(i, physnode_mask); - /* - * Fill physical nodes with fake nodes of size until there is no memory - * left on any of them. - */ - while (nodes_weight(physnode_mask)) { - for_each_node_mask(i, physnode_mask) { - u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; - u64 end; - - end = find_end_of_node(physnodes[i].start, - physnodes[i].end, size); - /* - * If there won't be at least FAKE_NODE_MIN_SIZE of - * non-reserved memory in ZONE_DMA32 for the next node, - * this one must extend to the boundary. - */ - if (end < dma32_end && dma32_end - end - - memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) - end = dma32_end; + for (i = 0; i < ARRAY_SIZE(mi->blk); i++) + if (mi->blk[i].start != mi->blk[i].end && + mi->blk[i].nid != NUMA_NO_NODE) + node_set(mi->blk[i].nid, *nodemask); +} - /* - * If there won't be enough non-reserved memory for the - * next node, this one must extend to the end of the - * physical node. - */ - if (physnodes[i].end - end - - memblock_x86_hole_size(end, physnodes[i].end) < size) - end = physnodes[i].end; +/** + * numa_reset_distance - Reset NUMA distance table + * + * The current table is freed. The next numa_set_distance() call will + * create a new one. + */ +void __init numa_reset_distance(void) +{ + size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); - /* - * Setup the fake node that will be allocated as bootmem - * later. If setup_node_range() returns non-zero, there - * is no more memory available on this physical node. - */ - if (setup_node_range(ret++, &physnodes[i].start, - end - physnodes[i].start, - physnodes[i].end) < 0) - node_clear(i, physnode_mask); - } - } - return ret; + /* numa_distance could be 1LU marking allocation failure, test cnt */ + if (numa_distance_cnt) + memblock_x86_free_range(__pa(numa_distance), + __pa(numa_distance) + size); + numa_distance_cnt = 0; + numa_distance = NULL; /* enable table creation */ } -/* - * Sets up the system RAM area from start_pfn to last_pfn according to the - * numa=fake command-line option. - */ -static int __init numa_emulation(unsigned long start_pfn, - unsigned long last_pfn, int acpi, int amd) +static int __init numa_alloc_distance(void) { - u64 addr = start_pfn << PAGE_SHIFT; - u64 max_addr = last_pfn << PAGE_SHIFT; - int num_nodes; - int i; + nodemask_t nodes_parsed; + size_t size; + int i, j, cnt = 0; + u64 phys; - /* - * If the numa=fake command-line contains a 'M' or 'G', it represents - * the fixed node size. Otherwise, if it is just a single number N, - * split the system RAM into N fake nodes. - */ - if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) { - u64 size; + /* size the new table and allocate it */ + nodes_parsed = numa_nodes_parsed; + numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); - size = memparse(cmdline, &cmdline); - num_nodes = split_nodes_size_interleave(addr, max_addr, size); - } else { - unsigned long n; + for_each_node_mask(i, nodes_parsed) + cnt = i; + cnt++; + size = cnt * cnt * sizeof(numa_distance[0]); - n = simple_strtoul(cmdline, NULL, 0); - num_nodes = split_nodes_interleave(addr, max_addr, n); + phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, + size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate distance table!\n"); + /* don't retry until explicitly reset */ + numa_distance = (void *)1LU; + return -ENOMEM; } + memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); - if (num_nodes < 0) - return num_nodes; - memnode_shift = compute_hash_shift(nodes, num_nodes, NULL); - if (memnode_shift < 0) { - memnode_shift = 0; - printk(KERN_ERR "No NUMA hash function found. NUMA emulation " - "disabled.\n"); - return -1; - } + numa_distance = __va(phys); + numa_distance_cnt = cnt; + + /* fill with the default distances */ + for (i = 0; i < cnt; i++) + for (j = 0; j < cnt; j++) + numa_distance[i * cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); - /* - * We need to vacate all active ranges that may have been registered for - * the e820 memory map. - */ - remove_all_active_ranges(); - for_each_node_mask(i, node_possible_map) { - memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, - nodes[i].end >> PAGE_SHIFT); - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - } - setup_physnodes(addr, max_addr, acpi, amd); - fake_physnodes(acpi, amd, num_nodes); - numa_init_array(); return 0; } -#endif /* CONFIG_NUMA_EMU */ -void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, - int acpi, int amd) +/** + * numa_set_distance - Set NUMA distance from one NUMA to another + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. If distance table + * doesn't exist, one which is large enough to accomodate all the currently + * known nodes will be created. + * + * If such table cannot be allocated, a warning is printed and further + * calls are ignored until the distance table is reset with + * numa_reset_distance(). + * + * If @from or @to is higher than the highest known node at the time of + * table creation or @distance doesn't make sense, the call is ignored. + * This is to allow simplification of specific NUMA config implementations. + */ +void __init numa_set_distance(int from, int to, int distance) { - int i; - - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - -#ifdef CONFIG_NUMA_EMU - setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, - acpi, amd); - if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) + if (!numa_distance && numa_alloc_distance() < 0) return; - setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, - acpi, amd); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); -#endif -#ifdef CONFIG_ACPI_NUMA - if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, - last_pfn << PAGE_SHIFT)) + if (from >= numa_distance_cnt || to >= numa_distance_cnt) { + printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", + from, to, distance); return; - nodes_clear(node_possible_map); - nodes_clear(node_online_map); -#endif + } -#ifdef CONFIG_AMD_NUMA - if (!numa_off && amd && !amd_scan_nodes()) + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); return; - nodes_clear(node_possible_map); - nodes_clear(node_online_map); -#endif - printk(KERN_INFO "%s\n", - numa_off ? "NUMA turned off" : "No NUMA configuration found"); + } - printk(KERN_INFO "Faking a node at %016lx-%016lx\n", - start_pfn << PAGE_SHIFT, - last_pfn << PAGE_SHIFT); - /* setup dummy node covering all memory */ - memnode_shift = 63; - memnodemap = memnode.embedded_map; - memnodemap[0] = 0; - node_set_online(0); - node_set(0, node_possible_map); - for (i = 0; i < nr_cpu_ids; i++) - numa_set_node(i, 0); - memblock_x86_register_active_regions(0, start_pfn, last_pfn); - setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); + numa_distance[from * numa_distance_cnt + to] = distance; } -unsigned long __init numa_free_all_bootmem(void) +int __node_distance(int from, int to) { - unsigned long pages = 0; - int i; + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); - for_each_online_node(i) - pages += free_all_bootmem_node(NODE_DATA(i)); +/* + * Sanity check to catch more bad NUMA configurations (they are amazingly + * common). Make sure the nodes cover all memory. + */ +static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) +{ + unsigned long numaram, e820ram; + int i; - pages += free_all_memory_core_early(MAX_NUMNODES); + numaram = 0; + for (i = 0; i < mi->nr_blks; i++) { + unsigned long s = mi->blk[i].start >> PAGE_SHIFT; + unsigned long e = mi->blk[i].end >> PAGE_SHIFT; + numaram += e - s; + numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); + if ((long)numaram < 0) + numaram = 0; + } - return pages; + e820ram = max_pfn - (memblock_x86_hole_size(0, + max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); + /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ + if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { + printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", + (numaram << PAGE_SHIFT) >> 20, + (e820ram << PAGE_SHIFT) >> 20); + return false; + } + return true; } -#ifdef CONFIG_NUMA - -static __init int find_near_online_node(int node) +static int __init numa_register_memblks(struct numa_meminfo *mi) { - int n, val; - int min_val = INT_MAX; - int best_node = -1; + int i, nid; - for_each_online_node(n) { - val = node_distance(node, n); + /* Account for nodes with cpus and no memory */ + node_possible_map = numa_nodes_parsed; + numa_nodemask_from_meminfo(&node_possible_map, mi); + if (WARN_ON(nodes_empty(node_possible_map))) + return -EINVAL; + + memnode_shift = compute_hash_shift(mi); + if (memnode_shift < 0) { + printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); + return -EINVAL; + } - if (val < min_val) { - min_val = val; - best_node = n; + for (i = 0; i < mi->nr_blks; i++) + memblock_x86_register_active_regions(mi->blk[i].nid, + mi->blk[i].start >> PAGE_SHIFT, + mi->blk[i].end >> PAGE_SHIFT); + + /* for out of order entries */ + sort_node_map(); + if (!numa_meminfo_cover_memory(mi)) + return -EINVAL; + + /* Finally register nodes. */ + for_each_node_mask(nid, node_possible_map) { + u64 start = (u64)max_pfn << PAGE_SHIFT; + u64 end = 0; + + for (i = 0; i < mi->nr_blks; i++) { + if (nid != mi->blk[i].nid) + continue; + start = min(mi->blk[i].start, start); + end = max(mi->blk[i].end, end); } + + if (start < end) + setup_node_bootmem(nid, start, end); } - return best_node; + return 0; } -/* - * Setup early cpu_to_node. +/** + * dummy_numma_init - Fallback dummy NUMA init * - * Populate cpu_to_node[] only if x86_cpu_to_apicid[], - * and apicid_to_node[] tables have valid entries for a CPU. - * This means we skip cpu_to_node[] initialisation for NUMA - * emulation and faking node case (when running a kernel compiled - * for NUMA on a non NUMA box), which is OK as cpu_to_node[] - * is already initialized in a round robin manner at numa_init_array, - * prior to this call, and this initialization is good enough - * for the fake NUMA cases. + * Used if there's no underlying NUMA architecture, NUMA initialization + * fails, or NUMA is disabled on the command line. * - * Called before the per_cpu areas are setup. + * Must online at least one node and add memory blocks that cover all + * allowed memory. This function must not fail. */ -void __init init_cpu_to_node(void) +static int __init dummy_numa_init(void) { - int cpu; - u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); - - BUG_ON(cpu_to_apicid == NULL); + printk(KERN_INFO "%s\n", + numa_off ? "NUMA turned off" : "No NUMA configuration found"); + printk(KERN_INFO "Faking a node at %016lx-%016lx\n", + 0LU, max_pfn << PAGE_SHIFT); - for_each_possible_cpu(cpu) { - int node; - u16 apicid = cpu_to_apicid[cpu]; + node_set(0, numa_nodes_parsed); + numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); - if (apicid == BAD_APICID) - continue; - node = apicid_to_node[apicid]; - if (node == NUMA_NO_NODE) - continue; - if (!node_online(node)) - node = find_near_online_node(node); - numa_set_node(cpu, node); - } + return 0; } -#endif - -void __cpuinit numa_set_node(int cpu, int node) +static int __init numa_init(int (*init_func)(void)) { - int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); - - /* early setting, no percpu area yet */ - if (cpu_to_node_map) { - cpu_to_node_map[cpu] = node; - return; - } - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { - printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); - dump_stack(); - return; - } -#endif - per_cpu(x86_cpu_to_node_map, cpu) = node; + int i; + int ret; - if (node != NUMA_NO_NODE) - set_cpu_numa_node(cpu, node); -} + for (i = 0; i < MAX_LOCAL_APIC; i++) + set_apicid_to_node(i, NUMA_NO_NODE); -void __cpuinit numa_clear_node(int cpu) -{ - numa_set_node(cpu, NUMA_NO_NODE); -} + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); + remove_all_active_ranges(); + numa_reset_distance(); -#ifndef CONFIG_DEBUG_PER_CPU_MAPS + ret = init_func(); + if (ret < 0) + return ret; + ret = numa_cleanup_meminfo(&numa_meminfo); + if (ret < 0) + return ret; -#ifndef CONFIG_NUMA_EMU -void __cpuinit numa_add_cpu(int cpu) -{ - cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); -} + numa_emulation(&numa_meminfo, numa_distance_cnt); -void __cpuinit numa_remove_cpu(int cpu) -{ - cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); -} -#else -void __cpuinit numa_add_cpu(int cpu) -{ - unsigned long addr; - u16 apicid; - int physnid; - int nid = NUMA_NO_NODE; + ret = numa_register_memblks(&numa_meminfo); + if (ret < 0) + return ret; - apicid = early_per_cpu(x86_cpu_to_apicid, cpu); - if (apicid != BAD_APICID) - nid = apicid_to_node[apicid]; - if (nid == NUMA_NO_NODE) - nid = early_cpu_to_node(cpu); - BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); - - /* - * Use the starting address of the emulated node to find which physical - * node it is allocated on. - */ - addr = node_start_pfn(nid) << PAGE_SHIFT; - for (physnid = 0; physnid < MAX_NUMNODES; physnid++) - if (addr >= physnodes[physnid].start && - addr < physnodes[physnid].end) - break; + for (i = 0; i < nr_cpu_ids; i++) { + int nid = early_cpu_to_node(i); - /* - * Map the cpu to each emulated node that is allocated on the physical - * node of the cpu's apic id. - */ - for_each_online_node(nid) { - addr = node_start_pfn(nid) << PAGE_SHIFT; - if (addr >= physnodes[physnid].start && - addr < physnodes[physnid].end) - cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); + if (nid == NUMA_NO_NODE) + continue; + if (!node_online(nid)) + numa_clear_node(i); } + numa_init_array(); + return 0; } -void __cpuinit numa_remove_cpu(int cpu) +void __init initmem_init(void) { - int i; + int ret; - for_each_online_node(i) - cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); -} -#endif /* !CONFIG_NUMA_EMU */ - -#else /* CONFIG_DEBUG_PER_CPU_MAPS */ -static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) -{ - int node = early_cpu_to_node(cpu); - struct cpumask *mask; - char buf[64]; - - mask = node_to_cpumask_map[node]; - if (!mask) { - pr_err("node_to_cpumask_map[%i] NULL\n", node); - dump_stack(); - return NULL; + if (!numa_off) { +#ifdef CONFIG_ACPI_NUMA + ret = numa_init(x86_acpi_numa_init); + if (!ret) + return; +#endif +#ifdef CONFIG_AMD_NUMA + ret = numa_init(amd_numa_init); + if (!ret) + return; +#endif } - cpulist_scnprintf(buf, sizeof(buf), mask); - printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", - enable ? "numa_add_cpu" : "numa_remove_cpu", - cpu, node, buf); - return mask; + numa_init(dummy_numa_init); } -/* - * --------- debug versions of the numa functions --------- - */ -#ifndef CONFIG_NUMA_EMU -static void __cpuinit numa_set_cpumask(int cpu, int enable) -{ - struct cpumask *mask; - - mask = debug_cpumask_set_cpu(cpu, enable); - if (!mask) - return; - - if (enable) - cpumask_set_cpu(cpu, mask); - else - cpumask_clear_cpu(cpu, mask); -} -#else -static void __cpuinit numa_set_cpumask(int cpu, int enable) +unsigned long __init numa_free_all_bootmem(void) { - int node = early_cpu_to_node(cpu); - struct cpumask *mask; + unsigned long pages = 0; int i; - for_each_online_node(i) { - unsigned long addr; - - addr = node_start_pfn(i) << PAGE_SHIFT; - if (addr < physnodes[node].start || - addr >= physnodes[node].end) - continue; - mask = debug_cpumask_set_cpu(cpu, enable); - if (!mask) - return; - - if (enable) - cpumask_set_cpu(cpu, mask); - else - cpumask_clear_cpu(cpu, mask); - } -} -#endif /* CONFIG_NUMA_EMU */ + for_each_online_node(i) + pages += free_all_bootmem_node(NODE_DATA(i)); -void __cpuinit numa_add_cpu(int cpu) -{ - numa_set_cpumask(cpu, 1); -} + pages += free_all_memory_core_early(MAX_NUMNODES); -void __cpuinit numa_remove_cpu(int cpu) -{ - numa_set_cpumask(cpu, 0); + return pages; } -int __cpu_to_node(int cpu) +int __cpuinit numa_cpu_node(int cpu) { - if (early_per_cpu_ptr(x86_cpu_to_node_map)) { - printk(KERN_WARNING - "cpu_to_node(%d): usage too early!\n", cpu); - dump_stack(); - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - } - return per_cpu(x86_cpu_to_node_map, cpu); -} -EXPORT_SYMBOL(__cpu_to_node); + int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); -/* - * Same function as cpu_to_node() but used if called before the - * per_cpu areas are setup. - */ -int early_cpu_to_node(int cpu) -{ - if (early_per_cpu_ptr(x86_cpu_to_node_map)) - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - - if (!cpu_possible(cpu)) { - printk(KERN_WARNING - "early_cpu_to_node(%d): no per_cpu area!\n", cpu); - dump_stack(); - return NUMA_NO_NODE; - } - return per_cpu(x86_cpu_to_node_map, cpu); + if (apicid != BAD_APICID) + return __apicid_to_node[apicid]; + return NUMA_NO_NODE; } - -/* - * --------- end of debug versions of the numa functions --------- - */ - -#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c new file mode 100644 index 000000000000..ad091e4cff17 --- /dev/null +++ b/arch/x86/mm/numa_emulation.c @@ -0,0 +1,494 @@ +/* + * NUMA emulation + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/topology.h> +#include <linux/memblock.h> +#include <asm/dma.h> + +#include "numa_internal.h" + +static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; +static char *emu_cmdline __initdata; + +void __init numa_emu_cmdline(char *str) +{ + emu_cmdline = str; +} + +static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) +{ + int i; + + for (i = 0; i < mi->nr_blks; i++) + if (mi->blk[i].nid == nid) + return i; + return -ENOENT; +} + +/* + * Sets up nid to range from @start to @end. The return value is -errno if + * something went wrong, 0 otherwise. + */ +static int __init emu_setup_memblk(struct numa_meminfo *ei, + struct numa_meminfo *pi, + int nid, int phys_blk, u64 size) +{ + struct numa_memblk *eb = &ei->blk[ei->nr_blks]; + struct numa_memblk *pb = &pi->blk[phys_blk]; + + if (ei->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: Too many emulated memblks, failing emulation\n"); + return -EINVAL; + } + + ei->nr_blks++; + eb->start = pb->start; + eb->end = pb->start + size; + eb->nid = nid; + + if (emu_nid_to_phys[nid] == NUMA_NO_NODE) + emu_nid_to_phys[nid] = pb->nid; + + pb->start += size; + if (pb->start >= pb->end) { + WARN_ON_ONCE(pb->start > pb->end); + numa_remove_memblk_from(phys_blk, pi); + } + + printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, + eb->start, eb->end, (eb->end - eb->start) >> 20); + return 0; +} + +/* + * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr + * to max_addr. The return value is the number of nodes allocated. + */ +static int __init split_nodes_interleave(struct numa_meminfo *ei, + struct numa_meminfo *pi, + u64 addr, u64 max_addr, int nr_nodes) +{ + nodemask_t physnode_mask = NODE_MASK_NONE; + u64 size; + int big; + int nid = 0; + int i, ret; + + if (nr_nodes <= 0) + return -1; + if (nr_nodes > MAX_NUMNODES) { + pr_info("numa=fake=%d too large, reducing to %d\n", + nr_nodes, MAX_NUMNODES); + nr_nodes = MAX_NUMNODES; + } + + size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; + /* + * Calculate the number of big nodes that can be allocated as a result + * of consolidating the remainder. + */ + big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / + FAKE_NODE_MIN_SIZE; + + size &= FAKE_NODE_MIN_HASH_MASK; + if (!size) { + pr_err("Not enough memory for each node. " + "NUMA emulation disabled.\n"); + return -1; + } + + for (i = 0; i < pi->nr_blks; i++) + node_set(pi->blk[i].nid, physnode_mask); + + /* + * Continue to fill physical nodes with fake nodes until there is no + * memory left on any of them. + */ + while (nodes_weight(physnode_mask)) { + for_each_node_mask(i, physnode_mask) { + u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); + u64 start, limit, end; + int phys_blk; + + phys_blk = emu_find_memblk_by_nid(i, pi); + if (phys_blk < 0) { + node_clear(i, physnode_mask); + continue; + } + start = pi->blk[phys_blk].start; + limit = pi->blk[phys_blk].end; + end = start + size; + + if (nid < big) + end += FAKE_NODE_MIN_SIZE; + + /* + * Continue to add memory to this fake node if its + * non-reserved memory is less than the per-node size. + */ + while (end - start - + memblock_x86_hole_size(start, end) < size) { + end += FAKE_NODE_MIN_SIZE; + if (end > limit) { + end = limit; + break; + } + } + + /* + * If there won't be at least FAKE_NODE_MIN_SIZE of + * non-reserved memory in ZONE_DMA32 for the next node, + * this one must extend to the boundary. + */ + if (end < dma32_end && dma32_end - end - + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + end = dma32_end; + + /* + * If there won't be enough non-reserved memory for the + * next node, this one must extend to the end of the + * physical node. + */ + if (limit - end - + memblock_x86_hole_size(end, limit) < size) + end = limit; + + ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, + phys_blk, + min(end, limit) - start); + if (ret < 0) + return ret; + } + } + return 0; +} + +/* + * Returns the end address of a node so that there is at least `size' amount of + * non-reserved memory or `max_addr' is reached. + */ +static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) +{ + u64 end = start + size; + + while (end - start - memblock_x86_hole_size(start, end) < size) { + end += FAKE_NODE_MIN_SIZE; + if (end > max_addr) { + end = max_addr; + break; + } + } + return end; +} + +/* + * Sets up fake nodes of `size' interleaved over physical nodes ranging from + * `addr' to `max_addr'. The return value is the number of nodes allocated. + */ +static int __init split_nodes_size_interleave(struct numa_meminfo *ei, + struct numa_meminfo *pi, + u64 addr, u64 max_addr, u64 size) +{ + nodemask_t physnode_mask = NODE_MASK_NONE; + u64 min_size; + int nid = 0; + int i, ret; + + if (!size) + return -1; + /* + * The limit on emulated nodes is MAX_NUMNODES, so the size per node is + * increased accordingly if the requested size is too small. This + * creates a uniform distribution of node sizes across the entire + * machine (but not necessarily over physical nodes). + */ + min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / + MAX_NUMNODES; + min_size = max(min_size, FAKE_NODE_MIN_SIZE); + if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) + min_size = (min_size + FAKE_NODE_MIN_SIZE) & + FAKE_NODE_MIN_HASH_MASK; + if (size < min_size) { + pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", + size >> 20, min_size >> 20); + size = min_size; + } + size &= FAKE_NODE_MIN_HASH_MASK; + + for (i = 0; i < pi->nr_blks; i++) + node_set(pi->blk[i].nid, physnode_mask); + + /* + * Fill physical nodes with fake nodes of size until there is no memory + * left on any of them. + */ + while (nodes_weight(physnode_mask)) { + for_each_node_mask(i, physnode_mask) { + u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; + u64 start, limit, end; + int phys_blk; + + phys_blk = emu_find_memblk_by_nid(i, pi); + if (phys_blk < 0) { + node_clear(i, physnode_mask); + continue; + } + start = pi->blk[phys_blk].start; + limit = pi->blk[phys_blk].end; + + end = find_end_of_node(start, limit, size); + /* + * If there won't be at least FAKE_NODE_MIN_SIZE of + * non-reserved memory in ZONE_DMA32 for the next node, + * this one must extend to the boundary. + */ + if (end < dma32_end && dma32_end - end - + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + end = dma32_end; + + /* + * If there won't be enough non-reserved memory for the + * next node, this one must extend to the end of the + * physical node. + */ + if (limit - end - + memblock_x86_hole_size(end, limit) < size) + end = limit; + + ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, + phys_blk, + min(end, limit) - start); + if (ret < 0) + return ret; + } + } + return 0; +} + +/** + * numa_emulation - Emulate NUMA nodes + * @numa_meminfo: NUMA configuration to massage + * @numa_dist_cnt: The size of the physical NUMA distance table + * + * Emulate NUMA nodes according to the numa=fake kernel parameter. + * @numa_meminfo contains the physical memory configuration and is modified + * to reflect the emulated configuration on success. @numa_dist_cnt is + * used to determine the size of the physical distance table. + * + * On success, the following modifications are made. + * + * - @numa_meminfo is updated to reflect the emulated nodes. + * + * - __apicid_to_node[] is updated such that APIC IDs are mapped to the + * emulated nodes. + * + * - NUMA distance table is rebuilt to represent distances between emulated + * nodes. The distances are determined considering how emulated nodes + * are mapped to physical nodes and match the actual distances. + * + * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical + * nodes. This is used by numa_add_cpu() and numa_remove_cpu(). + * + * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with + * identity mapping and no other modification is made. + */ +void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) +{ + static struct numa_meminfo ei __initdata; + static struct numa_meminfo pi __initdata; + const u64 max_addr = max_pfn << PAGE_SHIFT; + u8 *phys_dist = NULL; + size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); + int max_emu_nid, dfl_phys_nid; + int i, j, ret; + + if (!emu_cmdline) + goto no_emu; + + memset(&ei, 0, sizeof(ei)); + pi = *numa_meminfo; + + for (i = 0; i < MAX_NUMNODES; i++) + emu_nid_to_phys[i] = NUMA_NO_NODE; + + /* + * If the numa=fake command-line contains a 'M' or 'G', it represents + * the fixed node size. Otherwise, if it is just a single number N, + * split the system RAM into N fake nodes. + */ + if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { + u64 size; + + size = memparse(emu_cmdline, &emu_cmdline); + ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); + } else { + unsigned long n; + + n = simple_strtoul(emu_cmdline, NULL, 0); + ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); + } + + if (ret < 0) + goto no_emu; + + if (numa_cleanup_meminfo(&ei) < 0) { + pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); + goto no_emu; + } + + /* copy the physical distance table */ + if (numa_dist_cnt) { + u64 phys; + + phys = memblock_find_in_range(0, + (u64)max_pfn_mapped << PAGE_SHIFT, + phys_size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); + goto no_emu; + } + memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); + phys_dist = __va(phys); + + for (i = 0; i < numa_dist_cnt; i++) + for (j = 0; j < numa_dist_cnt; j++) + phys_dist[i * numa_dist_cnt + j] = + node_distance(i, j); + } + + /* + * Determine the max emulated nid and the default phys nid to use + * for unmapped nodes. + */ + max_emu_nid = 0; + dfl_phys_nid = NUMA_NO_NODE; + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { + if (emu_nid_to_phys[i] != NUMA_NO_NODE) { + max_emu_nid = i; + if (dfl_phys_nid == NUMA_NO_NODE) + dfl_phys_nid = emu_nid_to_phys[i]; + } + } + if (dfl_phys_nid == NUMA_NO_NODE) { + pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n"); + goto no_emu; + } + + /* commit */ + *numa_meminfo = ei; + + /* + * Transform __apicid_to_node table to use emulated nids by + * reverse-mapping phys_nid. The maps should always exist but fall + * back to zero just in case. + */ + for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { + if (__apicid_to_node[i] == NUMA_NO_NODE) + continue; + for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) + if (__apicid_to_node[i] == emu_nid_to_phys[j]) + break; + __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; + } + + /* make sure all emulated nodes are mapped to a physical node */ + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + if (emu_nid_to_phys[i] == NUMA_NO_NODE) + emu_nid_to_phys[i] = dfl_phys_nid; + + /* transform distance table */ + numa_reset_distance(); + for (i = 0; i < max_emu_nid + 1; i++) { + for (j = 0; j < max_emu_nid + 1; j++) { + int physi = emu_nid_to_phys[i]; + int physj = emu_nid_to_phys[j]; + int dist; + + if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) + dist = physi == physj ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + else + dist = phys_dist[physi * numa_dist_cnt + physj]; + + numa_set_distance(i, j, dist); + } + } + + /* free the copied physical distance table */ + if (phys_dist) + memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); + return; + +no_emu: + /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ + for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + emu_nid_to_phys[i] = i; +} + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +void __cpuinit numa_add_cpu(int cpu) +{ + int physnid, nid; + + nid = early_cpu_to_node(cpu); + BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); + + physnid = emu_nid_to_phys[nid]; + + /* + * Map the cpu to each emulated node that is allocated on the physical + * node of the cpu's apic id. + */ + for_each_online_node(nid) + if (emu_nid_to_phys[nid] == physnid) + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + int i; + + for_each_online_node(i) + cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); +} +#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ +static void __cpuinit numa_set_cpumask(int cpu, int enable) +{ + struct cpumask *mask; + int nid, physnid, i; + + nid = early_cpu_to_node(cpu); + if (nid == NUMA_NO_NODE) { + /* early_cpu_to_node() already emits a warning and trace */ + return; + } + + physnid = emu_nid_to_phys[nid]; + + for_each_online_node(i) { + if (emu_nid_to_phys[nid] != physnid) + continue; + + mask = debug_cpumask_set_cpu(cpu, enable); + if (!mask) + return; + + if (enable) + cpumask_set_cpu(cpu, mask); + else + cpumask_clear_cpu(cpu, mask); + } +} + +void __cpuinit numa_add_cpu(int cpu) +{ + numa_set_cpumask(cpu, 1); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + numa_set_cpumask(cpu, 0); +} +#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h new file mode 100644 index 000000000000..ef2d97377d7c --- /dev/null +++ b/arch/x86/mm/numa_internal.h @@ -0,0 +1,31 @@ +#ifndef __X86_MM_NUMA_INTERNAL_H +#define __X86_MM_NUMA_INTERNAL_H + +#include <linux/types.h> +#include <asm/numa.h> + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; + +void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); +int __init numa_cleanup_meminfo(struct numa_meminfo *mi); +void __init numa_reset_distance(void); + +#ifdef CONFIG_NUMA_EMU +void __init numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt); +#else +static inline void numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt) +{ } +#endif + +#endif /* __X86_MM_NUMA_INTERNAL_H */ diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d343b3c81f3c..90825f2eb0f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; void update_page_count(int level, unsigned long pages) { - unsigned long flags; - /* Protect against CPA */ - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); direct_pages_count[level] += pages; - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } static void split_page_count(int level) @@ -394,7 +392,7 @@ static int try_preserve_large_page(pte_t *kpte, unsigned long address, struct cpa_data *cpa) { - unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; + unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; pte_t new_pte, old_pte, *tmp; pgprot_t old_prot, new_prot, req_prot; int i, do_split = 1; @@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, if (cpa->force_split) return 1; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); /* * Check for races, another CPU might have split this page * up already: @@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, } out_unlock: - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); return do_split; } static int split_large_page(pte_t *kpte, unsigned long address) { - unsigned long flags, pfn, pfninc = 1; + unsigned long pfn, pfninc = 1; unsigned int i, level; pte_t *pbase, *tmp; pgprot_t ref_prot; @@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!base) return -ENOMEM; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); /* * Check for races, another CPU might have split this page * up for us already: @@ -591,7 +589,7 @@ out_unlock: */ if (base) __free_page(base); - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); return 0; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 500242d3c96d..0113d19c8aa6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) static void pgd_dtor(pgd_t *pgd) { - unsigned long flags; /* can be called from interrupt context */ - if (SHARED_KERNEL_PMD) return; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); pgd_list_del(pgd); - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } /* @@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; pmd_t *pmds[PREALLOCATED_PMDS]; - unsigned long flags; pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); @@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) * respect to anything walking the pgd_list, so that they * never see a partially populated pgd. */ - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); pgd_ctor(mm, pgd); pgd_prepopulate_pmd(mm, pgd, pmds); - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); return pgd; diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index ae96e7b8051d..48651c6f657d 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c @@ -57,7 +57,7 @@ struct node_memory_chunk_s { static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; static int __initdata num_memory_chunks; /* total number of memory chunks */ -static u8 __initdata apicid_to_pxm[MAX_APICID]; +static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC]; int acpi_numa __initdata; @@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void) printk(KERN_DEBUG "Number of memory chunks in system = %d\n", num_memory_chunks); - for (i = 0; i < MAX_APICID; i++) - apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); + for (i = 0; i < MAX_LOCAL_APIC; i++) + set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); for (j = 0; j < num_memory_chunks; j++){ struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 603d285d1daa..8e9d3394f6d4 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -26,88 +26,34 @@ int acpi_numa __initdata; -static struct acpi_table_slit *acpi_slit; - -static nodemask_t nodes_parsed __initdata; -static nodemask_t cpu_nodes_parsed __initdata; -static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES]; -static int num_node_memblks __initdata; -static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata; -static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata; - static __init int setup_node(int pxm) { return acpi_map_pxm_to_node(pxm); } -static __init int conflicting_memblks(unsigned long start, unsigned long end) -{ - int i; - for (i = 0; i < num_node_memblks; i++) { - struct bootnode *nd = &node_memblk_range[i]; - if (nd->start == nd->end) - continue; - if (nd->end > start && nd->start < end) - return memblk_nodeid[i]; - if (nd->end == end && nd->start == start) - return memblk_nodeid[i]; - } - return -1; -} - -static __init void cutoff_node(int i, unsigned long start, unsigned long end) -{ - struct bootnode *nd = &nodes[i]; - - if (nd->start < start) { - nd->start = start; - if (nd->end < nd->start) - nd->start = nd->end; - } - if (nd->end > end) { - nd->end = end; - if (nd->start > nd->end) - nd->start = nd->end; - } -} - static __init void bad_srat(void) { - int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; - for (i = 0; i < MAX_LOCAL_APIC; i++) - apicid_to_node[i] = NUMA_NO_NODE; - for (i = 0; i < MAX_NUMNODES; i++) { - nodes[i].start = nodes[i].end = 0; - nodes_add[i].start = nodes_add[i].end = 0; - } - remove_all_active_ranges(); + memset(nodes_add, 0, sizeof(nodes_add)); } static __init inline int srat_disabled(void) { - return numa_off || acpi_numa < 0; + return acpi_numa < 0; } /* Callback for SLIT parsing */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { - unsigned length; - unsigned long phys; - - length = slit->header.length; - phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length, - PAGE_SIZE); - - if (phys == MEMBLOCK_ERROR) - panic(" Can not save slit!\n"); + int i, j; - acpi_slit = __va(phys); - memcpy(acpi_slit, slit, length); - memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT"); + for (i = 0; i < slit->locality_count; i++) + for (j = 0; j < slit->locality_count; j++) + numa_set_distance(pxm_to_node(i), pxm_to_node(j), + slit->entry[slit->locality_count * i + j]); } /* Callback for Proximity Domain -> x2APIC mapping */ @@ -138,8 +84,8 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); return; } - apicid_to_node[apic_id] = node; - node_set(node, cpu_nodes_parsed); + set_apicid_to_node(apic_id, node); + node_set(node, numa_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node); @@ -178,8 +124,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } - apicid_to_node[apic_id] = node; - node_set(node, cpu_nodes_parsed); + set_apicid_to_node(apic_id, node); + node_set(node, numa_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node); @@ -241,7 +187,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end) } if (changed) { - node_set(node, cpu_nodes_parsed); + node_set(node, numa_nodes_parsed); printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); } @@ -251,10 +197,8 @@ update_nodes_add(int node, unsigned long start, unsigned long end) void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { - struct bootnode *nd, oldnode; unsigned long start, end; int node, pxm; - int i; if (srat_disabled()) return; @@ -276,300 +220,31 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) bad_srat(); return; } - i = conflicting_memblks(start, end); - if (i == node) { - printk(KERN_WARNING - "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", - pxm, start, end, nodes[i].start, nodes[i].end); - } else if (i >= 0) { - printk(KERN_ERR - "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", - pxm, start, end, node_to_pxm(i), - nodes[i].start, nodes[i].end); + + if (numa_add_memblk(node, start, end) < 0) { bad_srat(); return; } - nd = &nodes[node]; - oldnode = *nd; - if (!node_test_and_set(node, nodes_parsed)) { - nd->start = start; - nd->end = end; - } else { - if (start < nd->start) - nd->start = start; - if (nd->end < end) - nd->end = end; - } printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, start, end); - if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { + if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) update_nodes_add(node, start, end); - /* restore nodes[node] */ - *nd = oldnode; - if ((nd->start | nd->end) == 0) - node_clear(node, nodes_parsed); - } - - node_memblk_range[num_node_memblks].start = start; - node_memblk_range[num_node_memblks].end = end; - memblk_nodeid[num_node_memblks] = node; - num_node_memblks++; -} - -/* Sanity check to catch more bad SRATs (they are amazingly common). - Make sure the PXMs cover all memory. */ -static int __init nodes_cover_memory(const struct bootnode *nodes) -{ - int i; - unsigned long pxmram, e820ram; - - pxmram = 0; - for_each_node_mask(i, nodes_parsed) { - unsigned long s = nodes[i].start >> PAGE_SHIFT; - unsigned long e = nodes[i].end >> PAGE_SHIFT; - pxmram += e - s; - pxmram -= __absent_pages_in_range(i, s, e); - if ((long)pxmram < 0) - pxmram = 0; - } - - e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT); - /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ - if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { - printk(KERN_ERR - "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", - (pxmram << PAGE_SHIFT) >> 20, - (e820ram << PAGE_SHIFT) >> 20); - return 0; - } - return 1; } void __init acpi_numa_arch_fixup(void) {} -#ifdef CONFIG_NUMA_EMU -void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, - unsigned long end) -{ - int i; - - for_each_node_mask(i, nodes_parsed) { - cutoff_node(i, start, end); - physnodes[i].start = nodes[i].start; - physnodes[i].end = nodes[i].end; - } -} -#endif /* CONFIG_NUMA_EMU */ - -/* Use the information discovered above to actually set up the nodes. */ -int __init acpi_scan_nodes(unsigned long start, unsigned long end) +int __init x86_acpi_numa_init(void) { - int i; - - if (acpi_numa <= 0) - return -1; - - /* First clean up the node list */ - for (i = 0; i < MAX_NUMNODES; i++) - cutoff_node(i, start, end); - - /* - * Join together blocks on the same node, holes between - * which don't overlap with memory on other nodes. - */ - for (i = 0; i < num_node_memblks; ++i) { - int j, k; - - for (j = i + 1; j < num_node_memblks; ++j) { - unsigned long start, end; - - if (memblk_nodeid[i] != memblk_nodeid[j]) - continue; - start = min(node_memblk_range[i].end, - node_memblk_range[j].end); - end = max(node_memblk_range[i].start, - node_memblk_range[j].start); - for (k = 0; k < num_node_memblks; ++k) { - if (memblk_nodeid[i] == memblk_nodeid[k]) - continue; - if (start < node_memblk_range[k].end && - end > node_memblk_range[k].start) - break; - } - if (k < num_node_memblks) - continue; - start = min(node_memblk_range[i].start, - node_memblk_range[j].start); - end = max(node_memblk_range[i].end, - node_memblk_range[j].end); - printk(KERN_INFO "SRAT: Node %d " - "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", - memblk_nodeid[i], - node_memblk_range[i].start, - node_memblk_range[i].end, - node_memblk_range[j].start, - node_memblk_range[j].end, - start, end); - node_memblk_range[i].start = start; - node_memblk_range[i].end = end; - k = --num_node_memblks - j; - memmove(memblk_nodeid + j, memblk_nodeid + j+1, - k * sizeof(*memblk_nodeid)); - memmove(node_memblk_range + j, node_memblk_range + j+1, - k * sizeof(*node_memblk_range)); - --j; - } - } - - memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, - memblk_nodeid); - if (memnode_shift < 0) { - printk(KERN_ERR - "SRAT: No NUMA node hash function found. Contact maintainer\n"); - bad_srat(); - return -1; - } - - for (i = 0; i < num_node_memblks; i++) - memblock_x86_register_active_regions(memblk_nodeid[i], - node_memblk_range[i].start >> PAGE_SHIFT, - node_memblk_range[i].end >> PAGE_SHIFT); - - /* for out of order entries in SRAT */ - sort_node_map(); - if (!nodes_cover_memory(nodes)) { - bad_srat(); - return -1; - } + int ret; - /* Account for nodes with cpus and no memory */ - nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed); - - /* Finally register nodes */ - for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - /* Try again in case setup_node_bootmem missed one due - to missing bootmem */ - for_each_node_mask(i, node_possible_map) - if (!node_online(i)) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - - for (i = 0; i < nr_cpu_ids; i++) { - int node = early_cpu_to_node(i); - - if (node == NUMA_NO_NODE) - continue; - if (!node_online(node)) - numa_clear_node(i); - } - numa_init_array(); - return 0; -} - -#ifdef CONFIG_NUMA_EMU -static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = { - [0 ... MAX_NUMNODES-1] = PXM_INVAL -}; -static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; -static int __init find_node_by_addr(unsigned long addr) -{ - int ret = NUMA_NO_NODE; - int i; - - for_each_node_mask(i, nodes_parsed) { - /* - * Find the real node that this emulated node appears on. For - * the sake of simplicity, we only use a real node's starting - * address to determine which emulated node it appears on. - */ - if (addr >= nodes[i].start && addr < nodes[i].end) { - ret = i; - break; - } - } - return ret; + ret = acpi_numa_init(); + if (ret < 0) + return ret; + return srat_disabled() ? -EINVAL : 0; } -/* - * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID - * mappings that respect the real ACPI topology but reflect our emulated - * environment. For each emulated node, we find which real node it appears on - * and create PXM to NID mappings for those fake nodes which mirror that - * locality. SLIT will now represent the correct distances between emulated - * nodes as a result of the real topology. - */ -void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) -{ - int i, j; - - for (i = 0; i < num_nodes; i++) { - int nid, pxm; - - nid = find_node_by_addr(fake_nodes[i].start); - if (nid == NUMA_NO_NODE) - continue; - pxm = node_to_pxm(nid); - if (pxm == PXM_INVAL) - continue; - fake_node_to_pxm_map[i] = pxm; - /* - * For each apicid_to_node mapping that exists for this real - * node, it must now point to the fake node ID. - */ - for (j = 0; j < MAX_LOCAL_APIC; j++) - if (apicid_to_node[j] == nid && - fake_apicid_to_node[j] == NUMA_NO_NODE) - fake_apicid_to_node[j] = i; - } - - /* - * If there are apicid-to-node mappings for physical nodes that do not - * have a corresponding emulated node, it should default to a guaranteed - * value. - */ - for (i = 0; i < MAX_LOCAL_APIC; i++) - if (apicid_to_node[i] != NUMA_NO_NODE && - fake_apicid_to_node[i] == NUMA_NO_NODE) - fake_apicid_to_node[i] = 0; - - for (i = 0; i < num_nodes; i++) - __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); - memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); - - nodes_clear(nodes_parsed); - for (i = 0; i < num_nodes; i++) - if (fake_nodes[i].start != fake_nodes[i].end) - node_set(i, nodes_parsed); -} - -static int null_slit_node_compare(int a, int b) -{ - return node_to_pxm(a) == node_to_pxm(b); -} -#else -static int null_slit_node_compare(int a, int b) -{ - return a == b; -} -#endif /* CONFIG_NUMA_EMU */ - -int __node_distance(int a, int b) -{ - int index; - - if (!acpi_slit) - return null_slit_node_compare(a, b) ? LOCAL_DISTANCE : - REMOTE_DISTANCE; - index = acpi_slit->locality_count * node_to_pxm(a); - return acpi_slit->entry[index + node_to_pxm(b)]; -} - -EXPORT_SYMBOL(__node_distance); - #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) int memory_add_physaddr_to_nid(u64 start) { diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 6acc724d5d8f..d6c0418c3e47 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, sender = this_cpu_read(tlb_vector_offset); f = &flush_state[sender]; - /* - * Could avoid this lock when - * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is - * probably not worth checking this for a cache-hot lock. - */ - raw_spin_lock(&f->tlbstate_lock); + if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) + raw_spin_lock(&f->tlbstate_lock); f->flush_mm = mm; f->flush_va = va; @@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, f->flush_mm = NULL; f->flush_va = 0; - raw_spin_unlock(&f->tlbstate_lock); + if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) + raw_spin_unlock(&f->tlbstate_lock); } void native_flush_tlb_others(const struct cpumask *cpumask, @@ -211,11 +208,10 @@ void native_flush_tlb_others(const struct cpumask *cpumask, if (is_uv_system()) { unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); if (cpumask) flush_tlb_others_ipi(cpumask, mm, va); - put_cpu(); return; } flush_tlb_others_ipi(cpumask, mm, va); diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index e27dffbbb1a7..026e4931d162 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -350,7 +350,7 @@ static int __init early_fill_mp_bus_info(void) #define ENABLE_CF8_EXT_CFG (1ULL << 46) -static void enable_pci_io_ecs(void *unused) +static void __cpuinit enable_pci_io_ecs(void *unused) { u64 reg; rdmsrl(MSR_AMD64_NB_CFG, reg); diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 85b68ef5e809..67858be4b52b 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c @@ -34,6 +34,7 @@ #include <linux/pci.h> #include <linux/init.h> +#include <asm/ce4100.h> #include <asm/pci_x86.h> struct sim_reg { @@ -254,7 +255,7 @@ int bridge_read(unsigned int devfn, int reg, int len, u32 *value) static int ce4100_conf_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { - int i, retval = 1; + int i; if (bus == 1) { for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) { @@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = { .write = ce4100_conf_write, }; -static int __init ce4100_pci_init(void) +int __init ce4100_pci_init(void) { init_sim_regs(); raw_pci_ops = &ce4100_pci_conf; - return 0; + /* Indicate caller that it should invoke pci_legacy_init() */ + return 1; } -subsys_initcall(ce4100_pci_init); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 25cd4a07d09f..8c4085a95ef1 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -20,7 +20,8 @@ #include <asm/xen/pci.h> #ifdef CONFIG_ACPI -static int xen_hvm_register_pirq(u32 gsi, int triggering) +static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, + int trigger, int polarity) { int rc, irq; struct physdev_map_pirq map_irq; @@ -41,7 +42,7 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering) return -1; } - if (triggering == ACPI_EDGE_SENSITIVE) { + if (trigger == ACPI_EDGE_SENSITIVE) { shareable = 0; name = "ioapic-edge"; } else { @@ -55,12 +56,6 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering) return irq; } - -static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, - int trigger, int polarity) -{ - return xen_hvm_register_pirq(gsi, trigger); -} #endif #if defined(CONFIG_PCI_MSI) @@ -91,7 +86,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { - int irq, pirq, ret = 0; + int irq, pirq; struct msi_desc *msidesc; struct msi_msg msg; @@ -99,39 +94,32 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) __read_msi_msg(msidesc, &msg); pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); - if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) { - xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? - "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ); - if (irq < 0) + if (msg.data != XEN_PIRQ_MSI_DATA || + xen_irq_from_pirq(pirq) < 0) { + pirq = xen_allocate_pirq_msi(dev, msidesc); + if (pirq < 0) goto error; - ret = set_irq_msi(irq, msidesc); - if (ret < 0) - goto error_while; - printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d" - " pirq=%d\n", irq, pirq); - return 0; + xen_msi_compose_msg(dev, pirq, &msg); + __write_msi_msg(msidesc, &msg); + dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); + } else { + dev_dbg(&dev->dev, + "xen: msi already bound to pirq=%d\n", pirq); } - xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? - "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ)); - if (irq < 0 || pirq < 0) + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, + (type == PCI_CAP_ID_MSIX) ? + "msi-x" : "msi"); + if (irq < 0) goto error; - printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq); - xen_msi_compose_msg(dev, pirq, &msg); - ret = set_irq_msi(irq, msidesc); - if (ret < 0) - goto error_while; - write_msi_msg(irq, &msg); + dev_dbg(&dev->dev, + "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); } return 0; -error_while: - unbind_from_irqhandler(irq, NULL); error: - if (ret == -ENODEV) - dev_err(&dev->dev, "Xen PCI frontend has not registered" \ - " MSI/MSI-X support!\n"); - - return ret; + dev_err(&dev->dev, + "Xen PCI frontend has not registered MSI/MSI-X support!\n"); + return -ENODEV; } /* @@ -150,35 +138,26 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return -ENOMEM; if (type == PCI_CAP_ID_MSIX) - ret = xen_pci_frontend_enable_msix(dev, &v, nvec); + ret = xen_pci_frontend_enable_msix(dev, v, nvec); else - ret = xen_pci_frontend_enable_msi(dev, &v); + ret = xen_pci_frontend_enable_msi(dev, v); if (ret) goto error; i = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_allocate_pirq(v[i], 0, /* not sharable */ - (type == PCI_CAP_ID_MSIX) ? - "pcifront-msi-x" : "pcifront-msi"); - if (irq < 0) { - ret = -1; + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, + (type == PCI_CAP_ID_MSIX) ? + "pcifront-msi-x" : + "pcifront-msi"); + if (irq < 0) goto free; - } - - ret = set_irq_msi(irq, msidesc); - if (ret) - goto error_while; i++; } kfree(v); return 0; -error_while: - unbind_from_irqhandler(irq, NULL); error: - if (ret == -ENODEV) - dev_err(&dev->dev, "Xen PCI frontend has not registered" \ - " MSI/MSI-X support!\n"); + dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); free: kfree(v); return ret; @@ -193,6 +172,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev) xen_pci_frontend_disable_msix(dev); else xen_pci_frontend_disable_msi(dev); + + /* Free the IRQ's and the msidesc using the generic code. */ + default_teardown_msi_irqs(dev); } static void xen_teardown_msi_irq(unsigned int irq) @@ -200,47 +182,82 @@ static void xen_teardown_msi_irq(unsigned int irq) xen_destroy_irq(irq); } +#ifdef CONFIG_XEN_DOM0 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { - int irq, ret; + int ret = 0; struct msi_desc *msidesc; list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_create_msi_irq(dev, msidesc, type); - if (irq < 0) - return -1; + struct physdev_map_pirq map_irq; - ret = set_irq_msi(irq, msidesc); - if (ret) - goto error; - } - return 0; + memset(&map_irq, 0, sizeof(map_irq)); + map_irq.domid = DOMID_SELF; + map_irq.type = MAP_PIRQ_TYPE_MSI; + map_irq.index = -1; + map_irq.pirq = -1; + map_irq.bus = dev->bus->number; + map_irq.devfn = dev->devfn; -error: - xen_destroy_irq(irq); + if (type == PCI_CAP_ID_MSIX) { + int pos; + u32 table_offset, bir; + + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); + + pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, + &table_offset); + bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); + + map_irq.table_base = pci_resource_start(dev, bir); + map_irq.entry_nr = msidesc->msi_attrib.entry_nr; + } + + ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); + if (ret) { + dev_warn(&dev->dev, "xen map irq failed %d\n", ret); + goto out; + } + + ret = xen_bind_pirq_msi_to_irq(dev, msidesc, + map_irq.pirq, map_irq.index, + (type == PCI_CAP_ID_MSIX) ? + "msi-x" : "msi"); + if (ret < 0) + goto out; + } + ret = 0; +out: return ret; } #endif +#endif static int xen_pcifront_enable_irq(struct pci_dev *dev) { int rc; int share = 1; + u8 gsi; - dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); - - if (dev->irq < 0) - return -EINVAL; + rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", + rc); + return rc; + } - if (dev->irq < NR_IRQS_LEGACY) + if (gsi < NR_IRQS_LEGACY) share = 0; - rc = xen_allocate_pirq(dev->irq, share, "pcifront"); + rc = xen_allocate_pirq(gsi, share, "pcifront"); if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", - dev->irq, rc); + dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", + gsi, rc); return rc; } + + dev->irq = rc; + dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); return 0; } diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index d2c0d51a7178..28071bb31db7 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -15,21 +15,20 @@ #include <linux/serial_reg.h> #include <linux/serial_8250.h> +#include <asm/ce4100.h> +#include <asm/prom.h> #include <asm/setup.h> +#include <asm/i8259.h> #include <asm/io.h> +#include <asm/io_apic.h> static int ce4100_i8042_detect(void) { return 0; } -static void __init sdv_find_smp_config(void) -{ -} - #ifdef CONFIG_SERIAL_8250 - static unsigned int mem_serial_in(struct uart_port *p, int offset) { offset = offset << p->regshift; @@ -118,6 +117,15 @@ static void __init sdv_arch_setup(void) sdv_serial_fixup(); } +#ifdef CONFIG_X86_IO_APIC +static void __cpuinit sdv_pci_init(void) +{ + x86_of_pci_init(); + /* We can't set this earlier, because we need to calibrate the timer */ + legacy_pic = &null_legacy_pic; +} +#endif + /* * CE4100 specific x86_init function overrides and early setup * calls. @@ -128,5 +136,11 @@ void __init x86_ce4100_early_setup(void) x86_platform.i8042_detect = ce4100_i8042_detect; x86_init.resources.probe_roms = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; - x86_init.mpparse.find_smp_config = sdv_find_smp_config; + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.pci.init = ce4100_pci_init; + +#ifdef CONFIG_X86_IO_APIC + x86_init.pci.init_irq = sdv_pci_init; + x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; +#endif } diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts new file mode 100644 index 000000000000..dc701ea58546 --- /dev/null +++ b/arch/x86/platform/ce4100/falconfalls.dts @@ -0,0 +1,428 @@ +/* + * CE4100 on Falcon Falls + * + * (c) Copyright 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License. + */ +/dts-v1/; +/ { + model = "intel,falconfalls"; + compatible = "intel,falconfalls"; + #address-cells = <1>; + #size-cells = <1>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "intel,ce4100"; + reg = <0>; + lapic = <&lapic0>; + }; + }; + + soc@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "intel,ce4100-cp"; + ranges; + + ioapic1: interrupt-controller@fec00000 { + #interrupt-cells = <2>; + compatible = "intel,ce4100-ioapic"; + interrupt-controller; + reg = <0xfec00000 0x1000>; + }; + + timer@fed00000 { + compatible = "intel,ce4100-hpet"; + reg = <0xfed00000 0x200>; + }; + + lapic0: interrupt-controller@fee00000 { + compatible = "intel,ce4100-lapic"; + reg = <0xfee00000 0x1000>; + }; + + pci@3fc { + #address-cells = <3>; + #size-cells = <2>; + compatible = "intel,ce4100-pci", "pci"; + device_type = "pci"; + bus-range = <0 0>; + ranges = <0x2000000 0 0xbffff000 0xbffff000 0 0x1000 + 0x2000000 0 0xdffe0000 0xdffe0000 0 0x1000 + 0x0000000 0 0x0 0x0 0 0x100>; + + /* Secondary IO-APIC */ + ioapic2: interrupt-controller@0,1 { + #interrupt-cells = <2>; + compatible = "intel,ce4100-ioapic"; + interrupt-controller; + reg = <0x100 0x0 0x0 0x0 0x0>; + assigned-addresses = <0x02000000 0x0 0xbffff000 0x0 0x1000>; + }; + + pci@1,0 { + #address-cells = <3>; + #size-cells = <2>; + compatible = "intel,ce4100-pci", "pci"; + device_type = "pci"; + bus-range = <1 1>; + ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; + + interrupt-parent = <&ioapic2>; + + display@2,0 { + compatible = "pci8086,2e5b.2", + "pci8086,2e5b", + "pciclass038000", + "pciclass0380"; + + reg = <0x11000 0x0 0x0 0x0 0x0>; + interrupts = <0 1>; + }; + + multimedia@3,0 { + compatible = "pci8086,2e5c.2", + "pci8086,2e5c", + "pciclass048000", + "pciclass0480"; + + reg = <0x11800 0x0 0x0 0x0 0x0>; + interrupts = <2 1>; + }; + + multimedia@4,0 { + compatible = "pci8086,2e5d.2", + "pci8086,2e5d", + "pciclass048000", + "pciclass0480"; + + reg = <0x12000 0x0 0x0 0x0 0x0>; + interrupts = <4 1>; + }; + + multimedia@4,1 { + compatible = "pci8086,2e5e.2", + "pci8086,2e5e", + "pciclass048000", + "pciclass0480"; + + reg = <0x12100 0x0 0x0 0x0 0x0>; + interrupts = <5 1>; + }; + + sound@6,0 { + compatible = "pci8086,2e5f.2", + "pci8086,2e5f", + "pciclass040100", + "pciclass0401"; + + reg = <0x13000 0x0 0x0 0x0 0x0>; + interrupts = <6 1>; + }; + + sound@6,1 { + compatible = "pci8086,2e5f.2", + "pci8086,2e5f", + "pciclass040100", + "pciclass0401"; + + reg = <0x13100 0x0 0x0 0x0 0x0>; + interrupts = <7 1>; + }; + + sound@6,2 { + compatible = "pci8086,2e60.2", + "pci8086,2e60", + "pciclass040100", + "pciclass0401"; + + reg = <0x13200 0x0 0x0 0x0 0x0>; + interrupts = <8 1>; + }; + + display@8,0 { + compatible = "pci8086,2e61.2", + "pci8086,2e61", + "pciclass038000", + "pciclass0380"; + + reg = <0x14000 0x0 0x0 0x0 0x0>; + interrupts = <9 1>; + }; + + display@8,1 { + compatible = "pci8086,2e62.2", + "pci8086,2e62", + "pciclass038000", + "pciclass0380"; + + reg = <0x14100 0x0 0x0 0x0 0x0>; + interrupts = <10 1>; + }; + + multimedia@8,2 { + compatible = "pci8086,2e63.2", + "pci8086,2e63", + "pciclass048000", + "pciclass0480"; + + reg = <0x14200 0x0 0x0 0x0 0x0>; + interrupts = <11 1>; + }; + + entertainment-encryption@9,0 { + compatible = "pci8086,2e64.2", + "pci8086,2e64", + "pciclass101000", + "pciclass1010"; + + reg = <0x14800 0x0 0x0 0x0 0x0>; + interrupts = <12 1>; + }; + + localbus@a,0 { + compatible = "pci8086,2e65.2", + "pci8086,2e65", + "pciclassff0000", + "pciclassff00"; + + reg = <0x15000 0x0 0x0 0x0 0x0>; + }; + + serial@b,0 { + compatible = "pci8086,2e66.2", + "pci8086,2e66", + "pciclass070003", + "pciclass0700"; + + reg = <0x15800 0x0 0x0 0x0 0x0>; + interrupts = <14 1>; + }; + + gpio@b,1 { + compatible = "pci8086,2e67.2", + "pci8086,2e67", + "pciclassff0000", + "pciclassff00"; + + #gpio-cells = <2>; + reg = <0x15900 0x0 0x0 0x0 0x0>; + interrupts = <15 1>; + gpio-controller; + }; + + i2c-controller@b,2 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "pci8086,2e68.2", + "pci8086,2e68", + "pciclass,ff0000", + "pciclass,ff00"; + + reg = <0x15a00 0x0 0x0 0x0 0x0>; + interrupts = <16 1>; + ranges = <0 0 0x02000000 0 0xdffe0500 0x100 + 1 0 0x02000000 0 0xdffe0600 0x100 + 2 0 0x02000000 0 0xdffe0700 0x100>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,ce4100-i2c-controller"; + reg = <0 0 0x100>; + }; + + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,ce4100-i2c-controller"; + reg = <1 0 0x100>; + + gpio@26 { + #gpio-cells = <2>; + compatible = "ti,pcf8575"; + reg = <0x26>; + gpio-controller; + }; + }; + + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,ce4100-i2c-controller"; + reg = <2 0 0x100>; + + gpio@26 { + #gpio-cells = <2>; + compatible = "ti,pcf8575"; + reg = <0x26>; + gpio-controller; + }; + }; + }; + + smard-card@b,3 { + compatible = "pci8086,2e69.2", + "pci8086,2e69", + "pciclass070500", + "pciclass0705"; + + reg = <0x15b00 0x0 0x0 0x0 0x0>; + interrupts = <15 1>; + }; + + spi-controller@b,4 { + #address-cells = <1>; + #size-cells = <0>; + compatible = + "pci8086,2e6a.2", + "pci8086,2e6a", + "pciclass,ff0000", + "pciclass,ff00"; + + reg = <0x15c00 0x0 0x0 0x0 0x0>; + interrupts = <15 1>; + + dac@0 { + compatible = "ti,pcm1755"; + reg = <0>; + spi-max-frequency = <115200>; + }; + + dac@1 { + compatible = "ti,pcm1609a"; + reg = <1>; + spi-max-frequency = <115200>; + }; + + eeprom@2 { + compatible = "atmel,at93c46"; + reg = <2>; + spi-max-frequency = <115200>; + }; + }; + + multimedia@b,7 { + compatible = "pci8086,2e6d.2", + "pci8086,2e6d", + "pciclassff0000", + "pciclassff00"; + + reg = <0x15f00 0x0 0x0 0x0 0x0>; + }; + + ethernet@c,0 { + compatible = "pci8086,2e6e.2", + "pci8086,2e6e", + "pciclass020000", + "pciclass0200"; + + reg = <0x16000 0x0 0x0 0x0 0x0>; + interrupts = <21 1>; + }; + + clock@c,1 { + compatible = "pci8086,2e6f.2", + "pci8086,2e6f", + "pciclassff0000", + "pciclassff00"; + + reg = <0x16100 0x0 0x0 0x0 0x0>; + interrupts = <3 1>; + }; + + usb@d,0 { + compatible = "pci8086,2e70.2", + "pci8086,2e70", + "pciclass0c0320", + "pciclass0c03"; + + reg = <0x16800 0x0 0x0 0x0 0x0>; + interrupts = <22 3>; + }; + + usb@d,1 { + compatible = "pci8086,2e70.2", + "pci8086,2e70", + "pciclass0c0320", + "pciclass0c03"; + + reg = <0x16900 0x0 0x0 0x0 0x0>; + interrupts = <22 3>; + }; + + sata@e,0 { + compatible = "pci8086,2e71.0", + "pci8086,2e71", + "pciclass010601", + "pciclass0106"; + + reg = <0x17000 0x0 0x0 0x0 0x0>; + interrupts = <23 3>; + }; + + flash@f,0 { + compatible = "pci8086,701.1", + "pci8086,701", + "pciclass050100", + "pciclass0501"; + + reg = <0x17800 0x0 0x0 0x0 0x0>; + interrupts = <13 1>; + }; + + entertainment-encryption@10,0 { + compatible = "pci8086,702.1", + "pci8086,702", + "pciclass101000", + "pciclass1010"; + + reg = <0x18000 0x0 0x0 0x0 0x0>; + }; + + co-processor@11,0 { + compatible = "pci8086,703.1", + "pci8086,703", + "pciclass0b4000", + "pciclass0b40"; + + reg = <0x18800 0x0 0x0 0x0 0x0>; + interrupts = <1 1>; + }; + + multimedia@12,0 { + compatible = "pci8086,704.0", + "pci8086,704", + "pciclass048000", + "pciclass0480"; + + reg = <0x19000 0x0 0x0 0x0 0x0>; + }; + }; + + isa@1f,0 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "isa"; + ranges = <1 0 0 0 0 0x100>; + + rtc@70 { + compatible = "intel,ce4100-rtc", "motorola,mc146818"; + interrupts = <8 3>; + interrupt-parent = <&ioapic1>; + ctrl-reg = <2>; + freq-reg = <0x26>; + reg = <1 0x70 2>; + }; + }; + }; + }; +}; diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index ea6529e93c6f..5c0207bf959b 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -31,6 +31,7 @@ #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/mrst.h> +#include <asm/mrst-vrtc.h> #include <asm/io.h> #include <asm/i8259.h> #include <asm/intel_scu_ipc.h> @@ -268,6 +269,7 @@ void __init x86_mrst_early_setup(void) x86_platform.calibrate_tsc = mrst_calibrate_tsc; x86_platform.i8042_detect = mrst_i8042_detect; + x86_init.timers.wallclock_init = mrst_rtc_init; x86_init.pci.init = pci_mrst_init; x86_init.pci.fixup_irqs = x86_init_noop; diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 32cd7edd71a0..04cf645feb92 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c @@ -100,22 +100,14 @@ int vrtc_set_mmss(unsigned long nowtime) void __init mrst_rtc_init(void) { - unsigned long rtc_paddr; - void __iomem *virt_base; + unsigned long vrtc_paddr = sfi_mrtc_array[0].phys_addr; sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); - if (!sfi_mrtc_num) + if (!sfi_mrtc_num || !vrtc_paddr) return; - rtc_paddr = sfi_mrtc_array[0].phys_addr; - - /* vRTC's register address may not be page aligned */ - set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr); - - virt_base = (void __iomem *)__fix_to_virt(FIX_LNW_VRTC); - virt_base += rtc_paddr & ~PAGE_MASK; - vrtc_virt_base = virt_base; - + vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC, + vrtc_paddr); x86_platform.get_wallclock = vrtc_get_time; x86_platform.set_wallclock = vrtc_set_mmss; } diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile index e797428b163b..c2a8cab65e5d 100644 --- a/arch/x86/platform/olpc/Makefile +++ b/arch/x86/platform/olpc/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_OLPC) += olpc.o obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o -obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o -obj-$(CONFIG_OLPC_OPENFIRMWARE_DT) += olpc_dt.o +obj-$(CONFIG_OLPC) += olpc_ofw.o +obj-$(CONFIG_OF_PROMTREE) += olpc_dt.o diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index df58e9cad96a..a7b38d35c29a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode) memset(bd2, 0, sizeof(struct bau_desc)); bd2->header.sw_ack_flag = 1; /* - * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub + * base_dest_nodeid is the nasid of the first uvhub * in the partition. The bit map will indicate uvhub numbers, * which are 0-N in a partition. Pnodes are unique system-wide. */ - bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; + bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); bd2->header.dest_subnodeid = 0x10; /* the LB */ bd2->header.command = UV_NET_ENDPOINT_INTD; bd2->header.int_both = 1; diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 7b24460917d5..374a05d8ad22 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); - struct irq_cfg *cfg = get_irq_chip_data(irq); + struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int mmr_pnode, err; @@ -148,7 +148,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, else irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, + irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); mmr_value = 0; diff --git a/arch/x86/platform/visws/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c index 632037671746..fe4cf8294878 100644 --- a/arch/x86/platform/visws/visws_quirks.c +++ b/arch/x86/platform/visws/visws_quirks.c @@ -569,11 +569,13 @@ out_unlock: static struct irqaction master_action = { .handler = piix4_master_intr, .name = "PIIX4-8259", + .flags = IRQF_NO_THREAD, }; static struct irqaction cascade_action = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static inline void set_piix4_virtual_irq_type(void) @@ -606,7 +608,7 @@ static void __init visws_pre_intr_init(void) chip = &cobalt_irq_type; if (chip) - set_irq_chip(i, chip); + irq_set_chip(i, chip); } setup_irq(CO_IRQ_8259, &master_action); diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 5b54892e4bc3..1c7121ba18ff 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -38,7 +38,7 @@ config XEN_MAX_DOMAIN_MEMORY config XEN_SAVE_RESTORE bool - depends on XEN && PM + depends on XEN default y config XEN_DEBUG_FS @@ -48,3 +48,11 @@ config XEN_DEBUG_FS help Enable statistics output and various tuning options in debugfs. Enabling this option may incur a significant performance overhead. + +config XEN_DEBUG + bool "Enable Xen debug checks" + depends on XEN + default n + help + Enable various WARN_ON checks in the Xen MMU code. + Enabling this option WILL incur a significant performance overhead. diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 50542efe45fb..49dbd78ec3cb 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1284,15 +1284,14 @@ static int init_hvm_pv_info(int *major, int *minor) xen_setup_features(); - pv_info = xen_info; - pv_info.kernel_rpl = 0; + pv_info.name = "Xen HVM"; xen_domain_type = XEN_HVM_DOMAIN; return 0; } -void xen_hvm_init_shared_info(void) +void __ref xen_hvm_init_shared_info(void) { int cpu; struct xen_add_to_physmap xatp; @@ -1331,6 +1330,8 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; + if (xen_have_vector_callback) + xen_init_lock_cpu(cpu); break; default: break; @@ -1355,6 +1356,7 @@ static void __init xen_hvm_guest_init(void) if (xen_feature(XENFEAT_hvm_callback_vector)) xen_have_vector_callback = 1; + xen_hvm_smp_init(); register_cpu_notifier(&xen_hvm_cpu_notifier); xen_unplug_emulated_devices(); have_vcpu_info_placement = 0; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61ad574..3f6f3347aa17 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -46,6 +46,7 @@ #include <linux/module.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/seq_file.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> @@ -416,8 +417,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) if (val & _PAGE_PRESENT) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; pteval_t flags = val & PTE_FLAGS_MASK; - unsigned long mfn = pfn_to_mfn(pfn); + unsigned long mfn; + if (!xen_feature(XENFEAT_auto_translated_physmap)) + mfn = get_phys_to_machine(pfn); + else + mfn = pfn; /* * If there's no mfn for the pfn, then just create an * empty non-present pte. Unfortunately this loses @@ -427,8 +432,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) if (unlikely(mfn == INVALID_P2M_ENTRY)) { mfn = 0; flags = 0; + } else { + /* + * Paramount to do this test _after_ the + * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & + * IDENTITY_FRAME_BIT resolves to true. + */ + mfn &= ~FOREIGN_FRAME_BIT; + if (mfn & IDENTITY_FRAME_BIT) { + mfn &= ~IDENTITY_FRAME_BIT; + flags |= _PAGE_IOMAP; + } } - val = ((pteval_t)mfn << PAGE_SHIFT) | flags; } @@ -532,6 +547,41 @@ pte_t xen_make_pte(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); +#ifdef CONFIG_XEN_DEBUG +pte_t xen_make_pte_debug(pteval_t pte) +{ + phys_addr_t addr = (pte & PTE_PFN_MASK); + phys_addr_t other_addr; + bool io_page = false; + pte_t _pte; + + if (pte & _PAGE_IOMAP) + io_page = true; + + _pte = xen_make_pte(pte); + + if (!addr) + return _pte; + + if (io_page && + (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { + other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; + WARN(addr != other_addr, + "0x%lx is using VM_IO, but it is 0x%lx!\n", + (unsigned long)addr, (unsigned long)other_addr); + } else { + pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; + other_addr = (_pte.pte & PTE_PFN_MASK); + WARN((addr == other_addr) && (!io_page) && (!iomap_set), + "0x%lx is missing VM_IO (and wasn't fixed)!\n", + (unsigned long)addr); + } + + return _pte; +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); +#endif + pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); @@ -986,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm) */ void xen_mm_pin_all(void) { - unsigned long flags; struct page *page; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { if (!PagePinned(page)) { @@ -998,7 +1047,7 @@ void xen_mm_pin_all(void) } } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } /* @@ -1099,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) */ void xen_mm_unpin_all(void) { - unsigned long flags; struct page *page; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { if (PageSavePinned(page)) { @@ -1112,7 +1160,7 @@ void xen_mm_unpin_all(void) } } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) @@ -1443,7 +1491,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) * early_ioremap fixmap slot, make sure it is RO. */ if (!is_early_ioremap_ptep(ptep) && - pfn >= e820_table_start && pfn < e820_table_end) + pfn >= pgt_buf_start && pfn < pgt_buf_end) pte = pte_wrprotect(pte); return pte; @@ -1942,6 +1990,9 @@ __init void xen_ident_map_ISA(void) static __init void xen_post_allocator_init(void) { +#ifdef CONFIG_XEN_DEBUG + pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); +#endif pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; @@ -2074,7 +2125,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, in_frames[i] = virt_to_mfn(vaddr); MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); - set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); + __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); if (out_frames) out_frames[i] = virt_to_pfn(vaddr); @@ -2353,6 +2404,18 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); #ifdef CONFIG_XEN_DEBUG_FS +static int p2m_dump_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, p2m_dump_show, NULL); +} + +static const struct file_operations p2m_dump_fops = { + .open = p2m_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static struct dentry *d_mmu_debug; static int __init xen_mmu_debugfs(void) @@ -2408,6 +2471,7 @@ static int __init xen_mmu_debugfs(void) debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, &mmu_stats.prot_commit_batched); + debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); return 0; } fs_initcall(xen_mmu_debugfs); diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index fd12d7ce7ff9..215a3ce61068 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -23,6 +23,129 @@ * P2M_PER_PAGE depends on the architecture, as a mfn is always * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to * 512 and 1024 entries respectively. + * + * In short, these structures contain the Machine Frame Number (MFN) of the PFN. + * + * However not all entries are filled with MFNs. Specifically for all other + * leaf entries, or for the top root, or middle one, for which there is a void + * entry, we assume it is "missing". So (for example) + * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. + * + * We also have the possibility of setting 1-1 mappings on certain regions, so + * that: + * pfn_to_mfn(0xc0000)=0xc0000 + * + * The benefit of this is, that we can assume for non-RAM regions (think + * PCI BARs, or ACPI spaces), we can create mappings easily b/c we + * get the PFN value to match the MFN. + * + * For this to work efficiently we have one new page p2m_identity and + * allocate (via reserved_brk) any other pages we need to cover the sides + * (1GB or 4MB boundary violations). All entries in p2m_identity are set to + * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, + * no other fancy value). + * + * On lookup we spot that the entry points to p2m_identity and return the + * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. + * If the entry points to an allocated page, we just proceed as before and + * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in + * appropriate functions (pfn_to_mfn). + * + * The reason for having the IDENTITY_FRAME_BIT instead of just returning the + * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a + * non-identity pfn. To protect ourselves against we elect to set (and get) the + * IDENTITY_FRAME_BIT on all identity mapped PFNs. + * + * This simplistic diagram is used to explain the more subtle piece of code. + * There is also a digram of the P2M at the end that can help. + * Imagine your E820 looking as so: + * + * 1GB 2GB + * /-------------------+---------\/----\ /----------\ /---+-----\ + * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | + * \-------------------+---------/\----/ \----------/ \---+-----/ + * ^- 1029MB ^- 2001MB + * + * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), + * 2048MB = 524288 (0x80000)] + * + * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB + * is actually not present (would have to kick the balloon driver to put it in). + * + * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: + * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start + * of the PFN and the end PFN (263424 and 512256 respectively). The first step + * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page + * covers 512^2 of page estate (1GB) and in case the start or end PFN is not + * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn + * to end pfn. We reserve_brk top leaf pages if they are missing (means they + * point to p2m_mid_missing). + * + * With the E820 example above, 263424 is not 1GB aligned so we allocate a + * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. + * Each entry in the allocate page is "missing" (points to p2m_missing). + * + * Next stage is to determine if we need to do a more granular boundary check + * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. + * We check if the start pfn and end pfn violate that boundary check, and if + * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer + * granularity of setting which PFNs are missing and which ones are identity. + * In our example 263424 and 512256 both fail the check so we reserve_brk two + * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" + * values) and assign them to p2m[1][2] and p2m[1][488] respectively. + * + * At this point we would at minimum reserve_brk one page, but could be up to + * three. Each call to set_phys_range_identity has at maximum a three page + * cost. If we were to query the P2M at this stage, all those entries from + * start PFN through end PFN (so 1029MB -> 2001MB) would return + * INVALID_P2M_ENTRY ("missing"). + * + * The next step is to walk from the start pfn to the end pfn setting + * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. + * If we find that the middle leaf is pointing to p2m_missing we can swap it + * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this + * point we do not need to worry about boundary aligment (so no need to + * reserve_brk a middle page, figure out which PFNs are "missing" and which + * ones are identity), as that has been done earlier. If we find that the + * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference + * that page (which covers 512 PFNs) and set the appropriate PFN with + * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we + * set from p2m[1][2][256->511] and p2m[1][488][0->256] with + * IDENTITY_FRAME_BIT set. + * + * All other regions that are void (or not filled) either point to p2m_missing + * (considered missing) or have the default value of INVALID_P2M_ENTRY (also + * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] + * contain the INVALID_P2M_ENTRY value and are considered "missing." + * + * This is what the p2m ends up looking (for the E820 above) with this + * fabulous drawing: + * + * p2m /--------------\ + * /-----\ | &mfn_list[0],| /-----------------\ + * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | + * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | + * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | + * |-----| \ | [p2m_identity]+\\ | .... | + * | 2 |--\ \-------------------->| ... | \\ \----------------/ + * |-----| \ \---------------/ \\ + * | 3 |\ \ \\ p2m_identity + * |-----| \ \-------------------->/---------------\ /-----------------\ + * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | + * \-----/ / | [p2m_identity]+-->| ..., ~0 | + * / /---------------\ | .... | \-----------------/ + * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | + * / | IDENTITY[@256]|<----/ \---------------/ + * / | ~0, ~0, .... | + * | \---------------/ + * | + * p2m_missing p2m_missing + * /------------------\ /------------\ + * | [p2m_mid_missing]+---->| ~0, ~0, ~0 | + * | [p2m_mid_missing]+---->| ..., ~0 | + * \------------------/ \------------/ + * + * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) */ #include <linux/init.h> @@ -30,6 +153,7 @@ #include <linux/list.h> #include <linux/hash.h> #include <linux/sched.h> +#include <linux/seq_file.h> #include <asm/cache.h> #include <asm/setup.h> @@ -59,9 +183,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); + RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); +/* We might hit two boundary violations at the start and end, at max each + * boundary violation will require three middle nodes. */ +RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -136,7 +266,7 @@ static void p2m_init(unsigned long *p2m) * - After resume we're called from within stop_machine, but the mfn * tree should alreay be completely allocated. */ -void xen_build_mfn_list_list(void) +void __ref xen_build_mfn_list_list(void) { unsigned long pfn; @@ -221,6 +351,9 @@ void __init xen_build_dynamic_phys_to_machine(void) p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); + p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_init(p2m_identity); + /* * The domain builder gives us a pre-constructed p2m array in * mfn_list for all the pages initially given to us, so we just @@ -266,6 +399,14 @@ unsigned long get_phys_to_machine(unsigned long pfn) mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); + /* + * The INVALID_P2M_ENTRY is filled in both p2m_*identity + * and in p2m_*missing, so returning the INVALID_P2M_ENTRY + * would be wrong. + */ + if (p2m_top[topidx][mididx] == p2m_identity) + return IDENTITY_FRAME(pfn); + return p2m_top[topidx][mididx][idx]; } EXPORT_SYMBOL_GPL(get_phys_to_machine); @@ -335,9 +476,11 @@ static bool alloc_p2m(unsigned long pfn) p2m_top_mfn_p[topidx] = mid_mfn; } - if (p2m_top[topidx][mididx] == p2m_missing) { + if (p2m_top[topidx][mididx] == p2m_identity || + p2m_top[topidx][mididx] == p2m_missing) { /* p2m leaf page is missing */ unsigned long *p2m; + unsigned long *p2m_orig = p2m_top[topidx][mididx]; p2m = alloc_p2m_page(); if (!p2m) @@ -345,7 +488,7 @@ static bool alloc_p2m(unsigned long pfn) p2m_init(p2m); - if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) + if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) free_p2m_page(p2m); else mid_mfn[mididx] = virt_to_mfn(p2m); @@ -354,11 +497,91 @@ static bool alloc_p2m(unsigned long pfn) return true; } +bool __early_alloc_p2m(unsigned long pfn) +{ + unsigned topidx, mididx, idx; + + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); + idx = p2m_index(pfn); + + /* Pfff.. No boundary cross-over, lets get out. */ + if (!idx) + return false; + + WARN(p2m_top[topidx][mididx] == p2m_identity, + "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", + topidx, mididx); + + /* + * Could be done by xen_build_dynamic_phys_to_machine.. + */ + if (p2m_top[topidx][mididx] != p2m_missing) + return false; + + /* Boundary cross-over for the edges: */ + if (idx) { + unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); + + p2m_init(p2m); + + p2m_top[topidx][mididx] = p2m; + + } + return idx != 0; +} +unsigned long set_phys_range_identity(unsigned long pfn_s, + unsigned long pfn_e) +{ + unsigned long pfn; + + if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) + return 0; + + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) + return pfn_e - pfn_s; + + if (pfn_s > pfn_e) + return 0; + + for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); + pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); + pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) + { + unsigned topidx = p2m_top_index(pfn); + if (p2m_top[topidx] == p2m_mid_missing) { + unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); + + p2m_mid_init(mid); + + p2m_top[topidx] = mid; + } + } + + __early_alloc_p2m(pfn_s); + __early_alloc_p2m(pfn_e); + + for (pfn = pfn_s; pfn < pfn_e; pfn++) + if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) + break; + + if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), + "Identity mapping failed. We are %ld short of 1-1 mappings!\n", + (pfn_e - pfn_s) - (pfn - pfn_s))) + printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); + + return pfn - pfn_s; +} + /* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { + BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); + return true; + } if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; @@ -368,6 +591,21 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); + /* For sparse holes were the p2m leaf has real PFN along with + * PCI holes, stick in the PFN as the MFN value. + */ + if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { + if (p2m_top[topidx][mididx] == p2m_identity) + return true; + + /* Swap over from MISSING to IDENTITY if needed. */ + if (p2m_top[topidx][mididx] == p2m_missing) { + WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, + p2m_identity) != p2m_missing); + return true; + } + } + if (p2m_top[topidx][mididx] == p2m_missing) return mfn == INVALID_P2M_ENTRY; @@ -378,11 +616,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return true; - } - if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!alloc_p2m(pfn)) return false; @@ -421,7 +654,7 @@ int m2p_add_override(unsigned long mfn, struct page *page) { unsigned long flags; unsigned long pfn; - unsigned long address; + unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; @@ -455,7 +688,7 @@ int m2p_remove_override(struct page *page) unsigned long flags; unsigned long mfn; unsigned long pfn; - unsigned long address; + unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; @@ -520,3 +753,80 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) return ret; } EXPORT_SYMBOL_GPL(m2p_find_override_pfn); + +#ifdef CONFIG_XEN_DEBUG_FS + +int p2m_dump_show(struct seq_file *m, void *v) +{ + static const char * const level_name[] = { "top", "middle", + "entry", "abnormal" }; + static const char * const type_name[] = { "identity", "missing", + "pfn", "abnormal"}; +#define TYPE_IDENTITY 0 +#define TYPE_MISSING 1 +#define TYPE_PFN 2 +#define TYPE_UNKNOWN 3 + unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; + unsigned int uninitialized_var(prev_level); + unsigned int uninitialized_var(prev_type); + + if (!p2m_top) + return 0; + + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { + unsigned topidx = p2m_top_index(pfn); + unsigned mididx = p2m_mid_index(pfn); + unsigned idx = p2m_index(pfn); + unsigned lvl, type; + + lvl = 4; + type = TYPE_UNKNOWN; + if (p2m_top[topidx] == p2m_mid_missing) { + lvl = 0; type = TYPE_MISSING; + } else if (p2m_top[topidx] == NULL) { + lvl = 0; type = TYPE_UNKNOWN; + } else if (p2m_top[topidx][mididx] == NULL) { + lvl = 1; type = TYPE_UNKNOWN; + } else if (p2m_top[topidx][mididx] == p2m_identity) { + lvl = 1; type = TYPE_IDENTITY; + } else if (p2m_top[topidx][mididx] == p2m_missing) { + lvl = 1; type = TYPE_MISSING; + } else if (p2m_top[topidx][mididx][idx] == 0) { + lvl = 2; type = TYPE_UNKNOWN; + } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { + lvl = 2; type = TYPE_IDENTITY; + } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { + lvl = 2; type = TYPE_MISSING; + } else if (p2m_top[topidx][mididx][idx] == pfn) { + lvl = 2; type = TYPE_PFN; + } else if (p2m_top[topidx][mididx][idx] != pfn) { + lvl = 2; type = TYPE_PFN; + } + if (pfn == 0) { + prev_level = lvl; + prev_type = type; + } + if (pfn == MAX_DOMAIN_PAGES-1) { + lvl = 3; + type = TYPE_UNKNOWN; + } + if (prev_type != type) { + seq_printf(m, " [0x%lx->0x%lx] %s\n", + prev_pfn_type, pfn, type_name[prev_type]); + prev_pfn_type = pfn; + prev_type = type; + } + if (prev_level != lvl) { + seq_printf(m, " [0x%lx->0x%lx] level %s\n", + prev_pfn_level, pfn, level_name[prev_level]); + prev_pfn_level = pfn; + prev_level = lvl; + } + } + return 0; +#undef TYPE_IDENTITY +#undef TYPE_MISSING +#undef TYPE_PFN +#undef TYPE_UNKNOWN +} +#endif diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index a8a66a50d446..fa0269a99377 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; static __init void xen_add_extra_mem(unsigned long pages) { + unsigned long pfn; + u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; @@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages) xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); + + for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } static unsigned long __init xen_release_chunk(phys_addr_t start_addr, @@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", start, end, ret); if (ret == 1) { - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; } } @@ -138,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, return released; } +static unsigned long __init xen_set_identity(const struct e820entry *list, + ssize_t map_size) +{ + phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; + phys_addr_t start_pci = last; + const struct e820entry *entry; + unsigned long identity = 0; + int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + phys_addr_t start = entry->addr; + phys_addr_t end = start + entry->size; + + if (start < last) + start = last; + + if (end <= start) + continue; + + /* Skip over the 1MB region. */ + if (last > end) + continue; + + if (entry->type == E820_RAM) { + if (start > start_pci) + identity += set_phys_range_identity( + PFN_UP(start_pci), PFN_DOWN(start)); + + /* Without saving 'last' we would gooble RAM too + * at the end of the loop. */ + last = end; + start_pci = end; + continue; + } + start_pci = min(start, start_pci); + last = end; + } + if (last > start_pci) + identity += set_phys_range_identity( + PFN_UP(start_pci), PFN_DOWN(last)); + return identity; +} /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; + static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; @@ -151,6 +199,7 @@ char * __init xen_memory_setup(void) struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; + unsigned long identity_pages = 0; int i; int op; @@ -176,6 +225,7 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { @@ -194,6 +244,15 @@ char * __init xen_memory_setup(void) end -= delta; extra_pages += PFN_DOWN(delta); + /* + * Set RAM below 4GB that is not for us to be unusable. + * This prevents "System RAM" address space from being + * used as potential resource for I/O address (happens + * when 'allocate_resource' is called). + */ + if (delta && + (xen_initial_domain() && end < 0x100000000ULL)) + e820_add_region(end, delta, E820_UNUSABLE); } if (map[i].size > 0 && end > xen_extra_mem_start) @@ -251,6 +310,13 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(extra_pages); + /* + * Set P2M for all non-RAM pages and E820 gaps to be identity + * type PFNs. We supply it with the non-sanitized version + * of the E820. + */ + identity_pages = xen_set_identity(map_raw, memmap.nr_entries); + printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 72a4c7959045..30612441ed99 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -509,3 +509,41 @@ void __init xen_smp_init(void) xen_fill_possible_map(); xen_init_spinlocks(); } + +static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) +{ + native_smp_prepare_cpus(max_cpus); + WARN_ON(xen_smp_intr_init(0)); + + if (!xen_have_vector_callback) + return; + xen_init_lock_cpu(0); + xen_init_spinlocks(); +} + +static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) +{ + int rc; + rc = native_cpu_up(cpu); + WARN_ON (xen_smp_intr_init(cpu)); + return rc; +} + +static void xen_hvm_cpu_die(unsigned int cpu) +{ + unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); + unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); + unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); + unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); + native_cpu_die(cpu); +} + +void __init xen_hvm_smp_init(void) +{ + smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; + smp_ops.smp_send_reschedule = xen_smp_send_reschedule; + smp_ops.cpu_up = xen_hvm_cpu_up; + smp_ops.cpu_die = xen_hvm_cpu_die; + smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; + smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; +} diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 9bbd63a129b5..45329c8c226e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -12,7 +12,7 @@ #include "xen-ops.h" #include "mmu.h" -void xen_pre_suspend(void) +void xen_arch_pre_suspend(void) { xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = @@ -26,8 +26,9 @@ void xen_pre_suspend(void) BUG(); } -void xen_hvm_post_suspend(int suspend_cancelled) +void xen_arch_hvm_post_suspend(int suspend_cancelled) { +#ifdef CONFIG_XEN_PVHVM int cpu; xen_hvm_init_shared_info(); xen_callback_vector(); @@ -37,9 +38,10 @@ void xen_hvm_post_suspend(int suspend_cancelled) xen_setup_runstate_info(cpu); } } +#endif } -void xen_post_suspend(int suspend_cancelled) +void xen_arch_post_suspend(int suspend_cancelled) { xen_build_mfn_list_list(); diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 067759e3d6a5..2e2d370a47b1 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -397,7 +397,9 @@ void xen_setup_timer(int cpu) name = "<timer kasprintf failed>"; irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, + IRQF_DISABLED|IRQF_PERCPU| + IRQF_NOBALANCING|IRQF_TIMER| + IRQF_FORCE_RESUME, name, NULL); evt = &per_cpu(xen_clock_events, cpu); diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 1a5ff24e29c0..aaa7291c9259 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -28,9 +28,9 @@ ENTRY(startup_xen) __FINIT .pushsection .text - .align PAGE_SIZE_asm + .align PAGE_SIZE ENTRY(hypercall_page) - .skip PAGE_SIZE_asm + .skip PAGE_SIZE .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9d41bf985757..3112f55638c4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -64,10 +64,12 @@ void xen_setup_vcpu_info_placement(void); #ifdef CONFIG_SMP void xen_smp_init(void); +void __init xen_hvm_smp_init(void); extern cpumask_var_t xen_cpu_initialized_map; #else static inline void xen_smp_init(void) {} +static inline void xen_hvm_smp_init(void) {} #endif #ifdef CONFIG_PARAVIRT_SPINLOCKS diff --git a/arch/xtensa/include/asm/ioctls.h b/arch/xtensa/include/asm/ioctls.h index ccf1800f0b0c..fd1d1369a407 100644 --- a/arch/xtensa/include/asm/ioctls.h +++ b/arch/xtensa/include/asm/ioctls.h @@ -100,6 +100,7 @@ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP _IO('T', 0x37) #define TIOCSERCONFIG _IO('T', 83) #define TIOCSERGWILD _IOR('T', 84, int) diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h index e39edf5c86f2..249619e7e7f2 100644 --- a/arch/xtensa/include/asm/rwsem.h +++ b/arch/xtensa/include/asm/rwsem.h @@ -17,44 +17,12 @@ #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." #endif -#include <linux/list.h> -#include <linux/spinlock.h> -#include <asm/atomic.h> -#include <asm/system.h> - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -}; - -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} /* * lock for reading @@ -160,9 +128,4 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* _XTENSA_RWSEM_H */ diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 19df764f6399..f3e5eb43f71c 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -96,16 +96,12 @@ again: update_process_times(user_mode(get_irq_regs())); #endif - write_seqlock(&xtime_lock); - - do_timer(1); /* Linux handler in kernel/timer.c */ + xtime_update(1); /* Linux handler in kernel/time/timekeeping */ /* Note that writing CCOMPARE clears the interrupt. */ next += CCOUNT_PER_JIFFY; set_linux_timer(next); - - write_sequnlock(&xtime_lock); } /* Allow platform to do something useful (Wdog). */ diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 9b526154c9ba..a2820065927e 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -155,7 +155,7 @@ SECTIONS INIT_RAM_FS } - PERCPU(PAGE_SIZE) + PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE) /* We need this dummy segment here */ |