diff options
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/airq.h | 67 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 14 | ||||
-rw-r--r-- | arch/s390/include/asm/cio.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/hardirq.h | 5 | ||||
-rw-r--r-- | arch/s390/include/asm/hugetlb.h | 135 | ||||
-rw-r--r-- | arch/s390/include/asm/hw_irq.h | 17 | ||||
-rw-r--r-- | arch/s390/include/asm/irq.h | 35 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/page.h | 19 | ||||
-rw-r--r-- | arch/s390/include/asm/pci.h | 54 | ||||
-rw-r--r-- | arch/s390/include/asm/pci_insn.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/pci_io.h | 10 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 637 | ||||
-rw-r--r-- | arch/s390/include/asm/processor.h | 10 | ||||
-rw-r--r-- | arch/s390/include/asm/serial.h | 6 | ||||
-rw-r--r-- | arch/s390/include/asm/switch_to.h | 13 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 11 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 6 |
18 files changed, 545 insertions, 510 deletions
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h index 4066cee0c2d2..4bbb5957ed1b 100644 --- a/arch/s390/include/asm/airq.h +++ b/arch/s390/include/asm/airq.h @@ -9,6 +9,8 @@ #ifndef _ASM_S390_AIRQ_H #define _ASM_S390_AIRQ_H +#include <linux/bit_spinlock.h> + struct airq_struct { struct hlist_node list; /* Handler queueing. */ void (*handler)(struct airq_struct *); /* Thin-interrupt handler */ @@ -23,4 +25,69 @@ struct airq_struct { int register_adapter_interrupt(struct airq_struct *airq); void unregister_adapter_interrupt(struct airq_struct *airq); +/* Adapter interrupt bit vector */ +struct airq_iv { + unsigned long *vector; /* Adapter interrupt bit vector */ + unsigned long *avail; /* Allocation bit mask for the bit vector */ + unsigned long *bitlock; /* Lock bit mask for the bit vector */ + unsigned long *ptr; /* Pointer associated with each bit */ + unsigned int *data; /* 32 bit value associated with each bit */ + unsigned long bits; /* Number of bits in the vector */ + unsigned long end; /* Number of highest allocated bit + 1 */ + spinlock_t lock; /* Lock to protect alloc & free */ +}; + +#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */ +#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */ +#define AIRQ_IV_PTR 4 /* Allocate the ptr array */ +#define AIRQ_IV_DATA 8 /* Allocate the data array */ + +struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); +void airq_iv_release(struct airq_iv *iv); +unsigned long airq_iv_alloc_bit(struct airq_iv *iv); +void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit); +unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, + unsigned long end); + +static inline unsigned long airq_iv_end(struct airq_iv *iv) +{ + return iv->end; +} + +static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit) +{ + const unsigned long be_to_le = BITS_PER_LONG - 1; + bit_spin_lock(bit ^ be_to_le, iv->bitlock); +} + +static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit) +{ + const unsigned long be_to_le = BITS_PER_LONG - 1; + bit_spin_unlock(bit ^ be_to_le, iv->bitlock); +} + +static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit, + unsigned int data) +{ + iv->data[bit] = data; +} + +static inline unsigned int airq_iv_get_data(struct airq_iv *iv, + unsigned long bit) +{ + return iv->data[bit]; +} + +static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit, + unsigned long ptr) +{ + iv->ptr[bit] = ptr; +} + +static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv, + unsigned long bit) +{ + return iv->ptr[bit]; +} + #endif /* _ASM_S390_AIRQ_H */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 4d8604e311f3..10135a38673c 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " oc %O0(1,%R0),%1" - : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); + : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc"); } static inline void @@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " nc %O0(1,%R0),%1" - : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); + : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc"); } static inline void @@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " xc %O0(1,%R0),%1" - : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); + : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc"); } static inline void @@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) ch = *(unsigned char *) addr; asm volatile( " oc %O0(1,%R0),%1" - : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) + : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc", "memory"); return (ch >> (nr & 7)) & 1; } @@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) ch = *(unsigned char *) addr; asm volatile( " nc %O0(1,%R0),%1" - : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) + : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc", "memory"); return (ch >> (nr & 7)) & 1; } @@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) ch = *(unsigned char *) addr; asm volatile( " xc %O0(1,%R0),%1" - : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) + : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc", "memory"); return (ch >> (nr & 7)) & 1; } @@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr, size -= offset; p = addr + offset / BITS_PER_LONG; if (bit) { - set = __flo_word(0, *p & (~0UL << bit)); + set = __flo_word(0, *p & (~0UL >> bit)); if (set >= size) return size + offset; if (set < BITS_PER_LONG) diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index ffb898961c8d..d42625053c37 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -296,6 +296,7 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, return 0; } +void channel_subsystem_reinit(void); extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 0c82ba86e997..a908d2941c5d 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -20,4 +20,9 @@ #define HARDIRQ_BITS 8 +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); +} + #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index bd90359d6d22..11eae5f55b70 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -17,6 +17,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); +pte_t huge_ptep_get(pte_t *ptep); +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); /* * If the arch doesn't supply something else, assume that hugepage @@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file, int arch_prepare_hugepage(struct page *page); void arch_release_hugepage(struct page *page); -static inline pte_t huge_pte_wrprotect(pte_t pte) +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { - pte_val(pte) |= _PAGE_RO; - return pte; + pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY; } -static inline int huge_pte_none(pte_t pte) +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) { - return (pte_val(pte) & _SEGMENT_ENTRY_INV) && - !(pte_val(pte) & _SEGMENT_ENTRY_RO); + huge_ptep_get_and_clear(vma->vm_mm, address, ptep); } -static inline pte_t huge_ptep_get(pte_t *ptep) +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) { - pte_t pte = *ptep; - unsigned long mask; - - if (!MACHINE_HAS_HPAGE) { - ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN); - if (ptep) { - mask = pte_val(pte) & - (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); - pte = pte_mkhuge(*ptep); - pte_val(pte) |= mask; - } + int changed = !pte_same(huge_ptep_get(ptep), pte); + if (changed) { + huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); } - return pte; + return changed; } -static inline void __pmd_csp(pmd_t *pmdp) +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { - register unsigned long reg2 asm("2") = pmd_val(*pmdp); - register unsigned long reg3 asm("3") = pmd_val(*pmdp) | - _SEGMENT_ENTRY_INV; - register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; - - asm volatile( - " csp %1,%3" - : "=m" (*pmdp) - : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); + pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } -static inline void huge_ptep_invalidate(struct mm_struct *mm, - unsigned long address, pte_t *ptep) -{ - pmd_t *pmdp = (pmd_t *) ptep; - - if (MACHINE_HAS_IDTE) - __pmd_idte(address, pmdp); - else - __pmd_csp(pmdp); - pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; -} - -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - pte_t pte = huge_ptep_get(ptep); - - huge_ptep_invalidate(mm, addr, ptep); - return pte; -} - -#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ - if (__changed) { \ - huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ - set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ - } \ - __changed; \ -}) - -#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ -({ \ - pte_t __pte = huge_ptep_get(__ptep); \ - if (huge_pte_write(__pte)) { \ - huge_ptep_invalidate(__mm, __addr, __ptep); \ - set_huge_pte_at(__mm, __addr, __ptep, \ - huge_pte_wrprotect(__pte)); \ - } \ -}) - -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) { - huge_ptep_invalidate(vma->vm_mm, address, ptep); + return mk_pte(page, pgprot); } -static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +static inline int huge_pte_none(pte_t pte) { - pte_t pte; - pmd_t pmd; - - pmd = mk_pmd_phys(page_to_phys(page), pgprot); - pte_val(pte) = pmd_val(pmd); - return pte; + return pte_none(pte); } static inline int huge_pte_write(pte_t pte) { - pmd_t pmd; - - pmd_val(pmd) = pte_val(pte); - return pmd_write(pmd); + return pte_write(pte); } static inline int huge_pte_dirty(pte_t pte) { - /* No dirty bit in the segment table entry. */ - return 0; + return pte_dirty(pte); } static inline pte_t huge_pte_mkwrite(pte_t pte) { - pmd_t pmd; - - pmd_val(pmd) = pte_val(pte); - pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); - return pte; + return pte_mkwrite(pte); } static inline pte_t huge_pte_mkdirty(pte_t pte) { - /* No dirty bit in the segment table entry. */ - return pte; + return pte_mkdirty(pte); } -static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +static inline pte_t huge_pte_wrprotect(pte_t pte) { - pmd_t pmd; - - pmd_val(pmd) = pte_val(pte); - pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); - return pte; + return pte_wrprotect(pte); } -static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) { - pmd_clear((pmd_t *) ptep); + return pte_modify(pte, newprot); } #endif /* _ASM_S390_HUGETLB_H */ diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h index 7e3d2586c1ff..ee96a8b697f9 100644 --- a/arch/s390/include/asm/hw_irq.h +++ b/arch/s390/include/asm/hw_irq.h @@ -4,19 +4,8 @@ #include <linux/msi.h> #include <linux/pci.h> -static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) -{ - return __irq_get_msi_desc(irq); -} - -/* Must be called with msi map lock held */ -static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi) -{ - if (!msi) - return -EINVAL; - - msi->irq = irq; - return 0; -} +void __init init_airq_interrupts(void); +void __init init_cio_interrupts(void); +void __init init_ext_interrupts(void); #endif diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 87c17bfb2968..1eaa3625803c 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -1,17 +1,28 @@ #ifndef _ASM_IRQ_H #define _ASM_IRQ_H +#define EXT_INTERRUPT 1 +#define IO_INTERRUPT 2 +#define THIN_INTERRUPT 3 + +#define NR_IRQS_BASE 4 + +#ifdef CONFIG_PCI_NR_MSI +# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) +#else +# define NR_IRQS NR_IRQS_BASE +#endif + +/* This number is used when no interrupt has been assigned */ +#define NO_IRQ 0 + +#ifndef __ASSEMBLY__ + #include <linux/hardirq.h> #include <linux/percpu.h> #include <linux/cache.h> #include <linux/types.h> -enum interruption_main_class { - EXTERNAL_INTERRUPT, - IO_INTERRUPT, - NR_IRQS -}; - enum interruption_class { IRQEXT_CLK, IRQEXT_EXC, @@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void); void measurement_alert_subclass_register(void); void measurement_alert_subclass_unregister(void); -#ifdef CONFIG_LOCKDEP -# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) -# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ - disable_irq_nosync(irq) -# define disable_irq_lockdep(irq) disable_irq(irq) -# define enable_irq_lockdep(irq) enable_irq(irq) -# define enable_irq_lockdep_irqrestore(irq, flags) \ - enable_irq(irq) -#endif +#define irq_canonicalize(irq) (irq) + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_IRQ_H */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e7755ed9b..7b7fce4e8469 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, WARN_ON(atomic_read(&prev->context.attach_count) < 0); atomic_inc(&next->context.attach_count); /* Check for TLBs not flushed yet */ - if (next->context.flush_mm) - __tlb_flush_mm(next); + __tlb_flush_mm_lazy(next); } #define enter_lazy_tlb(mm,tsk) do { } while (0) diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 5d64fb7619cc..1e51f2915b2e 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -32,16 +32,6 @@ void storage_key_init_range(unsigned long start, unsigned long end); -static inline unsigned long pfmf(unsigned long function, unsigned long address) -{ - asm volatile( - " .insn rre,0xb9af0000,%[function],%[address]" - : [address] "+a" (address) - : [function] "d" (function) - : "memory"); - return address; -} - static inline void clear_page(void *page) { register unsigned long reg1 asm ("1") = 0; @@ -150,15 +140,6 @@ static inline int page_reset_referenced(unsigned long addr) #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ -/* - * Test and clear referenced bit in storage key. - */ -#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG -static inline int page_test_and_clear_young(unsigned long pfn) -{ - return page_reset_referenced(pfn << PAGE_SHIFT); -} - struct page; void arch_free_page(struct page *page, int order); void arch_alloc_page(struct page *page, int order); diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 6e577ba0e5da..c290f13d1c47 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -6,6 +6,7 @@ /* must be set before including pci_clp.h */ #define PCI_BAR_COUNT 6 +#include <linux/pci.h> #include <asm-generic/pci.h> #include <asm-generic/pci-dma-compat.h> #include <asm/pci_clp.h> @@ -53,14 +54,9 @@ struct zpci_fmb { atomic64_t unmapped_pages; } __packed __aligned(16); -struct msi_map { - unsigned long irq; - struct msi_desc *msi; - struct hlist_node msi_chain; -}; - -#define ZPCI_NR_MSI_VECS 64 -#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1) +#define ZPCI_MSI_VEC_BITS 11 +#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS) +#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1) enum zpci_state { ZPCI_FN_STATE_RESERVED, @@ -91,8 +87,7 @@ struct zpci_dev { /* IRQ stuff */ u64 msi_addr; /* MSI address */ - struct zdev_irq_map *irq_map; - struct msi_map *msi_map[ZPCI_NR_MSI_VECS]; + struct airq_iv *aibv; /* adapter interrupt bit vector */ unsigned int aisb; /* number of the summary bit */ /* DMA stuff */ @@ -122,11 +117,6 @@ struct zpci_dev { struct dentry *debugfs_perf; }; -struct pci_hp_callback_ops { - int (*create_slot) (struct zpci_dev *zdev); - void (*remove_slot) (struct zpci_dev *zdev); -}; - static inline bool zdev_enabled(struct zpci_dev *zdev) { return (zdev->fh & (1UL << 31)) ? true : false; @@ -146,32 +136,38 @@ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); int zpci_unregister_ioat(struct zpci_dev *, u8); /* CLP */ -int clp_find_pci_devices(void); +int clp_scan_pci_devices(void); +int clp_rescan_pci_devices(void); +int clp_rescan_pci_devices_simple(void); int clp_add_pci_device(u32, u32, int); int clp_enable_fh(struct zpci_dev *, u8); int clp_disable_fh(struct zpci_dev *); -/* MSI */ -struct msi_desc *__irq_get_msi_desc(unsigned int); -int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32); -int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int); -void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *); -int zpci_msihash_init(void); -void zpci_msihash_exit(void); - #ifdef CONFIG_PCI /* Error handling and recovery */ void zpci_event_error(void *); void zpci_event_availability(void *); +void zpci_rescan(void); #else /* CONFIG_PCI */ static inline void zpci_event_error(void *e) {} static inline void zpci_event_availability(void *e) {} +static inline void zpci_rescan(void) {} #endif /* CONFIG_PCI */ +#ifdef CONFIG_HOTPLUG_PCI_S390 +int zpci_init_slot(struct zpci_dev *); +void zpci_exit_slot(struct zpci_dev *); +#else /* CONFIG_HOTPLUG_PCI_S390 */ +static inline int zpci_init_slot(struct zpci_dev *zdev) +{ + return 0; +} +static inline void zpci_exit_slot(struct zpci_dev *zdev) {} +#endif /* CONFIG_HOTPLUG_PCI_S390 */ + /* Helpers */ struct zpci_dev *get_zdev(struct pci_dev *); struct zpci_dev *get_zdev_by_fid(u32); -bool zpci_fid_present(u32); /* sysfs */ int zpci_sysfs_add_device(struct device *); @@ -181,14 +177,6 @@ void zpci_sysfs_remove_device(struct device *); int zpci_dma_init(void); void zpci_dma_exit(void); -/* Hotplug */ -extern struct mutex zpci_list_lock; -extern struct list_head zpci_list; -extern unsigned int s390_pci_probe; - -void zpci_register_hp_ops(struct pci_hp_callback_ops *); -void zpci_deregister_hp_ops(void); - /* FMB */ int zpci_fmb_enable_device(struct zpci_dev *); int zpci_fmb_disable_device(struct zpci_dev *); diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index e6a2bdd4d705..df6eac9f0cb4 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -79,11 +79,11 @@ struct zpci_fib { } __packed; -int s390pci_mod_fc(u64 req, struct zpci_fib *fib); -int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); -int s390pci_load(u64 *data, u64 req, u64 offset); -int s390pci_store(u64 data, u64 req, u64 offset); -int s390pci_store_block(const u64 *data, u64 req, u64 offset); -void set_irq_ctrl(u16 ctl, char *unused, u8 isc); +int zpci_mod_fc(u64 req, struct zpci_fib *fib); +int zpci_refresh_trans(u64 fn, u64 addr, u64 range); +int zpci_load(u64 *data, u64 req, u64 offset); +int zpci_store(u64 data, u64 req, u64 offset); +int zpci_store_block(const u64 *data, u64 req, u64 offset); +void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 83a9caa6ae53..d194d544d694 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ u64 data; \ int rc; \ \ - rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ + rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \ if (rc) \ data = -1ULL; \ return (RETTYPE) data; \ @@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ u64 data = (VALTYPE) val; \ \ - s390pci_store(data, req, ZPCI_OFFSET(addr)); \ + zpci_store(data, req, ZPCI_OFFSET(addr)); \ } zpci_read(8, u64) @@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len val = 0; /* let FW report error */ break; } - return s390pci_store(val, req, offset); + return zpci_store(val, req, offset); } static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) @@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) u64 data; int cc; - cc = s390pci_load(&data, req, offset); + cc = zpci_load(&data, req, offset); if (cc) goto out; @@ -115,7 +115,7 @@ out: static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) { - return s390pci_store_block(data, req, offset); + return zpci_store_block(data, req, offset); } static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 75fb726de91f..9f215b40109e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -217,63 +217,57 @@ extern unsigned long MODULES_END; /* Hardware bits in the page table entry */ #define _PAGE_CO 0x100 /* HW Change-bit override */ -#define _PAGE_RO 0x200 /* HW read-only bit */ +#define _PAGE_PROTECT 0x200 /* HW read-only bit */ #define _PAGE_INVALID 0x400 /* HW invalid bit */ +#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ /* Software bits in the page table entry */ -#define _PAGE_SWT 0x001 /* SW pte type bit t */ -#define _PAGE_SWX 0x002 /* SW pte type bit x */ -#define _PAGE_SWC 0x004 /* SW pte changed bit */ -#define _PAGE_SWR 0x008 /* SW pte referenced bit */ -#define _PAGE_SWW 0x010 /* SW pte write bit */ -#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ +#define _PAGE_PRESENT 0x001 /* SW pte present bit */ +#define _PAGE_TYPE 0x002 /* SW pte type bit */ +#define _PAGE_YOUNG 0x004 /* SW pte young bit */ +#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ +#define _PAGE_READ 0x010 /* SW pte read bit */ +#define _PAGE_WRITE 0x020 /* SW pte write bit */ +#define _PAGE_SPECIAL 0x040 /* SW associated with special page */ #define __HAVE_ARCH_PTE_SPECIAL /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ - _PAGE_SWC | _PAGE_SWR) - -/* Six different types of pages. */ -#define _PAGE_TYPE_EMPTY 0x400 -#define _PAGE_TYPE_NONE 0x401 -#define _PAGE_TYPE_SWAP 0x403 -#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ -#define _PAGE_TYPE_RO 0x200 -#define _PAGE_TYPE_RW 0x000 + _PAGE_DIRTY | _PAGE_YOUNG) /* - * Only four types for huge pages, using the invalid bit and protection bit - * of a segment table entry. - */ -#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ -#define _HPAGE_TYPE_NONE 0x220 -#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ -#define _HPAGE_TYPE_RW 0x000 - -/* - * PTE type bits are rather complicated. handle_pte_fault uses pte_present, - * pte_none and pte_file to find out the pte type WITHOUT holding the page - * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to - * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs - * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. - * This change is done while holding the lock, but the intermediate step - * of a previously valid pte with the hw invalid bit set can be observed by - * handle_pte_fault. That makes it necessary that all valid pte types with - * the hw invalid bit set must be distinguishable from the four pte types - * empty, none, swap and file. + * handle_pte_fault uses pte_present, pte_none and pte_file to find out the + * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit + * is used to distinguish present from not-present ptes. It is changed only + * with the page table lock held. + * + * The following table gives the different possible bit combinations for + * the pte hardware and software bits in the last 12 bits of a pte: * - * irxt ipte irxt - * _PAGE_TYPE_EMPTY 1000 -> 1000 - * _PAGE_TYPE_NONE 1001 -> 1001 - * _PAGE_TYPE_SWAP 1011 -> 1011 - * _PAGE_TYPE_FILE 11?1 -> 11?1 - * _PAGE_TYPE_RO 0100 -> 1100 - * _PAGE_TYPE_RW 0000 -> 1000 + * 842100000000 + * 000084210000 + * 000000008421 + * .IR...wrdytp + * empty .10...000000 + * swap .10...xxxx10 + * file .11...xxxxx0 + * prot-none, clean, old .11...000001 + * prot-none, clean, young .11...000101 + * prot-none, dirty, old .10...001001 + * prot-none, dirty, young .10...001101 + * read-only, clean, old .11...010001 + * read-only, clean, young .01...010101 + * read-only, dirty, old .11...011001 + * read-only, dirty, young .01...011101 + * read-write, clean, old .11...110001 + * read-write, clean, young .01...110101 + * read-write, dirty, old .10...111001 + * read-write, dirty, young .00...111101 * - * pte_none is true for bits combinations 1000, 1010, 1100, 1110 - * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 - * pte_file is true for bits combinations 1101, 1111 - * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. + * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 + * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 + * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600 + * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 */ #ifndef CONFIG_64BIT @@ -286,14 +280,25 @@ extern unsigned long MODULES_END; #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */ #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ -#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ -#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ +#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ +#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ +#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) -#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) +#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) + +/* + * Segment table entry encoding (I = invalid, R = read-only bit): + * ..R...I..... + * prot-none ..1...1..... + * read-only ..1...0..... + * read-write ..0...0..... + * empty ..0...1..... + */ /* Page status table bits for virtualization */ #define PGSTE_ACC_BITS 0xf0000000UL @@ -303,9 +308,7 @@ extern unsigned long MODULES_END; #define PGSTE_HC_BIT 0x00200000UL #define PGSTE_GR_BIT 0x00040000UL #define PGSTE_GC_BIT 0x00020000UL -#define PGSTE_UR_BIT 0x00008000UL -#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */ -#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */ +#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */ #else /* CONFIG_64BIT */ @@ -324,8 +327,8 @@ extern unsigned long MODULES_END; /* Bits in the region table entry */ #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ -#define _REGION_ENTRY_RO 0x200 /* region protection bit */ -#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ +#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ +#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ @@ -333,29 +336,47 @@ extern unsigned long MODULES_END; #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) -#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) +#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) -#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) +#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) -#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) +#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL +#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ -#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ -#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ +#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ +#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ #define _SEGMENT_ENTRY (0) -#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) +#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ +#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */ +#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */ +#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG + +/* + * Segment table entry encoding (R = read-only, I = invalid, y = young bit): + * ..R...I...y. + * prot-none, old ..0...1...1. + * prot-none, young ..1...1...1. + * read-only, old ..1...1...0. + * read-only, young ..1...0...1. + * read-write, old ..0...1...0. + * read-write, young ..0...0...1. + * The segment table origin is used to distinguish empty (origin==0) from + * read-write, old segment table entries (origin!=0) + */ + #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ -#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) /* Set of bits not changed in pmd_modify */ #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ @@ -369,9 +390,7 @@ extern unsigned long MODULES_END; #define PGSTE_HC_BIT 0x0020000000000000UL #define PGSTE_GR_BIT 0x0004000000000000UL #define PGSTE_GC_BIT 0x0002000000000000UL -#define PGSTE_UR_BIT 0x0000800000000000UL -#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */ -#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */ +#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ #endif /* CONFIG_64BIT */ @@ -386,14 +405,18 @@ extern unsigned long MODULES_END; /* * Page protection definitions. */ -#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) -#define PAGE_RO __pgprot(_PAGE_TYPE_RO) -#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) -#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) - -#define PAGE_KERNEL PAGE_RWC -#define PAGE_SHARED PAGE_KERNEL -#define PAGE_COPY PAGE_RO +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID) +#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + _PAGE_INVALID | _PAGE_PROTECT) +#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_INVALID | _PAGE_PROTECT) + +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_YOUNG | _PAGE_DIRTY) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_YOUNG | _PAGE_DIRTY) +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ + _PAGE_PROTECT) /* * On s390 the page table entry has an invalid bit and a read-only bit. @@ -402,35 +425,31 @@ extern unsigned long MODULES_END; */ /*xwr*/ #define __P000 PAGE_NONE -#define __P001 PAGE_RO -#define __P010 PAGE_RO -#define __P011 PAGE_RO -#define __P100 PAGE_RO -#define __P101 PAGE_RO -#define __P110 PAGE_RO -#define __P111 PAGE_RO +#define __P001 PAGE_READ +#define __P010 PAGE_READ +#define __P011 PAGE_READ +#define __P100 PAGE_READ +#define __P101 PAGE_READ +#define __P110 PAGE_READ +#define __P111 PAGE_READ #define __S000 PAGE_NONE -#define __S001 PAGE_RO -#define __S010 PAGE_RW -#define __S011 PAGE_RW -#define __S100 PAGE_RO -#define __S101 PAGE_RO -#define __S110 PAGE_RW -#define __S111 PAGE_RW +#define __S001 PAGE_READ +#define __S010 PAGE_WRITE +#define __S011 PAGE_WRITE +#define __S100 PAGE_READ +#define __S101 PAGE_READ +#define __S110 PAGE_WRITE +#define __S111 PAGE_WRITE /* * Segment entry (large page) protection definitions. */ -#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) -#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) -#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) - -static inline int mm_exclusive(struct mm_struct *mm) -{ - return likely(mm == current->active_mm && - atomic_read(&mm->context.attach_count) <= 1); -} +#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ + _SEGMENT_ENTRY_NONE) +#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \ + _SEGMENT_ENTRY_PROTECT) +#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID) static inline int mm_has_pgste(struct mm_struct *mm) { @@ -467,7 +486,7 @@ static inline int pgd_none(pgd_t pgd) { if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) return 0; - return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; + return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; } static inline int pgd_bad(pgd_t pgd) @@ -478,7 +497,7 @@ static inline int pgd_bad(pgd_t pgd) * invalid for either table entry. */ unsigned long mask = - ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & + ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; return (pgd_val(pgd) & mask) != 0; } @@ -494,7 +513,7 @@ static inline int pud_none(pud_t pud) { if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) return 0; - return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; + return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL; } static inline int pud_large(pud_t pud) @@ -512,7 +531,7 @@ static inline int pud_bad(pud_t pud) * invalid for either table entry. */ unsigned long mask = - ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & + ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; return (pud_val(pud) & mask) != 0; } @@ -521,30 +540,36 @@ static inline int pud_bad(pud_t pud) static inline int pmd_present(pmd_t pmd) { - unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; - return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || - !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); + return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; } static inline int pmd_none(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && - !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); + return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; } static inline int pmd_large(pmd_t pmd) { #ifdef CONFIG_64BIT - return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); + return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; #else return 0; #endif } +static inline int pmd_prot_none(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) && + (pmd_val(pmd) & _SEGMENT_ENTRY_NONE); +} + static inline int pmd_bad(pmd_t pmd) { - unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; - return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; +#ifdef CONFIG_64BIT + if (pmd_large(pmd)) + return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; +#endif + return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; } #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH @@ -563,31 +588,40 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, #define __HAVE_ARCH_PMD_WRITE static inline int pmd_write(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; + if (pmd_prot_none(pmd)) + return 0; + return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0; } static inline int pmd_young(pmd_t pmd) { - return 0; + int young = 0; +#ifdef CONFIG_64BIT + if (pmd_prot_none(pmd)) + young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0; + else + young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; +#endif + return young; } -static inline int pte_none(pte_t pte) +static inline int pte_present(pte_t pte) { - return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); + /* Bit pattern: (pte & 0x001) == 0x001 */ + return (pte_val(pte) & _PAGE_PRESENT) != 0; } -static inline int pte_present(pte_t pte) +static inline int pte_none(pte_t pte) { - unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; - return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || - (!(pte_val(pte) & _PAGE_INVALID) && - !(pte_val(pte) & _PAGE_SWT)); + /* Bit pattern: pte == 0x400 */ + return pte_val(pte) == _PAGE_INVALID; } static inline int pte_file(pte_t pte) { - unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; - return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; + /* Bit pattern: (pte & 0x601) == 0x600 */ + return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT)) + == (_PAGE_INVALID | _PAGE_PROTECT); } static inline int pte_special(pte_t pte) @@ -634,6 +668,15 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) #endif } +static inline pgste_t pgste_get(pte_t *ptep) +{ + unsigned long pgste = 0; +#ifdef CONFIG_PGSTE + pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); +#endif + return __pgste(pgste); +} + static inline void pgste_set(pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE @@ -644,33 +687,28 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste) static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE - unsigned long address, bits; - unsigned char skey; + unsigned long address, bits, skey; if (pte_val(*ptep) & _PAGE_INVALID) return pgste; address = pte_val(*ptep) & PAGE_MASK; - skey = page_get_storage_key(address); + skey = (unsigned long) page_get_storage_key(address); bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); - /* Clear page changed & referenced bit in the storage key */ - if (bits & _PAGE_CHANGED) + if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) { + /* Transfer dirty + referenced bit to host bits in pgste */ + pgste_val(pgste) |= bits << 52; page_set_storage_key(address, skey ^ bits, 0); - else if (bits) + } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) && + (bits & _PAGE_REFERENCED)) { + /* Transfer referenced bit to host bit in pgste */ + pgste_val(pgste) |= PGSTE_HR_BIT; page_reset_referenced(address); + } /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ - /* Get host changed & referenced bits from pgste */ - bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52; - /* Transfer page changed & referenced bit to kvm user bits */ - pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */ - /* Clear relevant host bits in pgste. */ - pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT); - pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); /* Copy page access key and fetch protection bit to pgste */ - pgste_val(pgste) |= - (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; - /* Transfer referenced bit to pte */ - pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; + pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); + pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; #endif return pgste; @@ -679,24 +717,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE - int young; - if (pte_val(*ptep) & _PAGE_INVALID) return pgste; /* Get referenced bit from storage key */ - young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); - if (young) - pgste_val(pgste) |= PGSTE_GR_BIT; - /* Get host referenced bit from pgste */ - if (pgste_val(pgste) & PGSTE_HR_BIT) { - pgste_val(pgste) &= ~PGSTE_HR_BIT; - young = 1; - } - /* Transfer referenced bit to kvm user bits and pte */ - if (young) { - pgste_val(pgste) |= PGSTE_UR_BIT; - pte_val(*ptep) |= _PAGE_SWR; - } + if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) + pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT; #endif return pgste; } @@ -723,13 +748,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) static inline void pgste_set_pte(pte_t *ptep, pte_t entry) { - if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { + if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { /* * Without enhanced suppression-on-protection force * the dirty bit on for all writable ptes. */ - pte_val(entry) |= _PAGE_SWC; - pte_val(entry) &= ~_PAGE_RO; + pte_val(entry) |= _PAGE_DIRTY; + pte_val(entry) &= ~_PAGE_PROTECT; } *ptep = entry; } @@ -841,21 +866,17 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, */ static inline int pte_write(pte_t pte) { - return (pte_val(pte) & _PAGE_SWW) != 0; + return (pte_val(pte) & _PAGE_WRITE) != 0; } static inline int pte_dirty(pte_t pte) { - return (pte_val(pte) & _PAGE_SWC) != 0; + return (pte_val(pte) & _PAGE_DIRTY) != 0; } static inline int pte_young(pte_t pte) { -#ifdef CONFIG_PGSTE - if (pte_val(pte) & _PAGE_SWR) - return 1; -#endif - return 0; + return (pte_val(pte) & _PAGE_YOUNG) != 0; } /* @@ -880,12 +901,12 @@ static inline void pud_clear(pud_t *pud) static inline void pmd_clear(pmd_t *pmdp) { - pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; + pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_val(*ptep) = _PAGE_TYPE_EMPTY; + pte_val(*ptep) = _PAGE_INVALID; } /* @@ -896,55 +917,63 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) |= pgprot_val(newprot); - if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) - pte_val(pte) &= ~_PAGE_RO; + /* + * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the + * invalid bit set, clear it again for readable, young pages + */ + if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) + pte_val(pte) &= ~_PAGE_INVALID; + /* + * newprot for PAGE_READ and PAGE_WRITE has the page protection + * bit set, clear it again for writable, dirty pages + */ + if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) + pte_val(pte) &= ~_PAGE_PROTECT; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { - pte_val(pte) &= ~_PAGE_SWW; - /* Do not clobber _PAGE_TYPE_NONE pages! */ - if (!(pte_val(pte) & _PAGE_INVALID)) - pte_val(pte) |= _PAGE_RO; + pte_val(pte) &= ~_PAGE_WRITE; + pte_val(pte) |= _PAGE_PROTECT; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) |= _PAGE_SWW; - if (pte_val(pte) & _PAGE_SWC) - pte_val(pte) &= ~_PAGE_RO; + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_DIRTY) + pte_val(pte) &= ~_PAGE_PROTECT; return pte; } static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~_PAGE_SWC; - /* Do not clobber _PAGE_TYPE_NONE pages! */ - if (!(pte_val(pte) & _PAGE_INVALID)) - pte_val(pte) |= _PAGE_RO; + pte_val(pte) &= ~_PAGE_DIRTY; + pte_val(pte) |= _PAGE_PROTECT; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_SWC; - if (pte_val(pte) & _PAGE_SWW) - pte_val(pte) &= ~_PAGE_RO; + pte_val(pte) |= _PAGE_DIRTY; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) &= ~_PAGE_PROTECT; return pte; } static inline pte_t pte_mkold(pte_t pte) { -#ifdef CONFIG_PGSTE - pte_val(pte) &= ~_PAGE_SWR; -#endif + pte_val(pte) &= ~_PAGE_YOUNG; + pte_val(pte) |= _PAGE_INVALID; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { + pte_val(pte) |= _PAGE_YOUNG; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) &= ~_PAGE_INVALID; return pte; } @@ -957,7 +986,7 @@ static inline pte_t pte_mkspecial(pte_t pte) #ifdef CONFIG_HUGETLB_PAGE static inline pte_t pte_mkhuge(pte_t pte) { - pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); + pte_val(pte) |= _PAGE_LARGE; return pte; } #endif @@ -974,8 +1003,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste = pgste_update_all(ptep, pgste); - dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); - pgste_val(pgste) &= ~PGSTE_UC_BIT; + dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); + pgste_val(pgste) &= ~PGSTE_HC_BIT; pgste_set_unlock(ptep, pgste); return dirty; } @@ -994,59 +1023,75 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste = pgste_update_young(ptep, pgste); - young = !!(pgste_val(pgste) & PGSTE_UR_BIT); - pgste_val(pgste) &= ~PGSTE_UR_BIT; + young = !!(pgste_val(pgste) & PGSTE_HR_BIT); + pgste_val(pgste) &= ~PGSTE_HR_BIT; pgste_set_unlock(ptep, pgste); } return young; } +static inline void __ptep_ipte(unsigned long address, pte_t *ptep) +{ + if (!(pte_val(*ptep) & _PAGE_INVALID)) { +#ifndef CONFIG_64BIT + /* pto must point to the start of the segment table */ + pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); +#else + /* ipte in zarch mode can do the math */ + pte_t *pto = ptep; +#endif + asm volatile( + " ipte %2,%3" + : "=m" (*ptep) : "m" (*ptep), + "a" (pto), "a" (address)); + } +} + +static inline void ptep_flush_lazy(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + int active = (mm == current->active_mm) ? 1 : 0; + + if (atomic_read(&mm->context.attach_count) > active) + __ptep_ipte(address, ptep); + else + mm->context.flush_mm = 1; +} + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pgste_t pgste; pte_t pte; + int young; if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_update_young(ptep, pgste); - pte = *ptep; - *ptep = pte_mkold(pte); - pgste_set_unlock(ptep, pgste); - return pte_young(pte); + pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); } - return 0; + + pte = *ptep; + __ptep_ipte(addr, ptep); + young = pte_young(pte); + pte = pte_mkold(pte); + + if (mm_has_pgste(vma->vm_mm)) { + pgste_set_pte(ptep, pte); + pgste_set_unlock(ptep, pgste); + } else + *ptep = pte; + + return young; } #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH static inline int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - /* No need to flush TLB - * On s390 reference bits are in storage key and never in TLB - * With virtualization we handle the reference bit, without we - * we can simply return */ return ptep_test_and_clear_young(vma, address, ptep); } -static inline void __ptep_ipte(unsigned long address, pte_t *ptep) -{ - if (!(pte_val(*ptep) & _PAGE_INVALID)) { -#ifndef CONFIG_64BIT - /* pto must point to the start of the segment table */ - pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); -#else - /* ipte in zarch mode can do the math */ - pte_t *pto = ptep; -#endif - asm volatile( - " ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (pto), "a" (address)); - } -} - /* * This is hard to understand. ptep_get_and_clear and ptep_clear_flush * both clear the TLB for the unmapped pte. The reason is that @@ -1067,16 +1112,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, pgste_t pgste; pte_t pte; - mm->context.flush_mm = 1; if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste = pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; - if (!mm_exclusive(mm)) - __ptep_ipte(address, ptep); - pte_val(*ptep) = _PAGE_TYPE_EMPTY; + ptep_flush_lazy(mm, address, ptep); + pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(mm)) { pgste = pgste_update_all(&pte, pgste); @@ -1093,15 +1136,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, pgste_t pgste; pte_t pte; - mm->context.flush_mm = 1; if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; - if (!mm_exclusive(mm)) - __ptep_ipte(address, ptep); + ptep_flush_lazy(mm, address, ptep); + pte_val(*ptep) |= _PAGE_INVALID; if (mm_has_pgste(mm)) { pgste = pgste_update_all(&pte, pgste); @@ -1117,7 +1159,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, pgste_t pgste; if (mm_has_pgste(mm)) { - pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); + pgste = pgste_get(ptep); pgste_set_key(ptep, pgste, pte); pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, pgste); @@ -1139,7 +1181,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, pte = *ptep; __ptep_ipte(address, ptep); - pte_val(*ptep) = _PAGE_TYPE_EMPTY; + pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_update_all(&pte, pgste); @@ -1163,18 +1205,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, pgste_t pgste; pte_t pte; - if (mm_has_pgste(mm)) { + if (!full && mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - if (!full) - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; if (!full) - __ptep_ipte(address, ptep); - pte_val(*ptep) = _PAGE_TYPE_EMPTY; + ptep_flush_lazy(mm, address, ptep); + pte_val(*ptep) = _PAGE_INVALID; - if (mm_has_pgste(mm)) { + if (!full && mm_has_pgste(mm)) { pgste = pgste_update_all(&pte, pgste); pgste_set_unlock(ptep, pgste); } @@ -1189,14 +1230,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, pte_t pte = *ptep; if (pte_write(pte)) { - mm->context.flush_mm = 1; if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste = pgste_ipte_notify(mm, address, ptep, pgste); } - if (!mm_exclusive(mm)) - __ptep_ipte(address, ptep); + ptep_flush_lazy(mm, address, ptep); pte = pte_wrprotect(pte); if (mm_has_pgste(mm)) { @@ -1240,7 +1279,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); - return __pte; + return pte_mkyoung(__pte); } static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) @@ -1248,10 +1287,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) unsigned long physpage = page_to_phys(page); pte_t __pte = mk_pte_phys(physpage, pgprot); - if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { - pte_val(__pte) |= _PAGE_SWC; - pte_val(__pte) &= ~_PAGE_RO; - } + if (pte_write(__pte) && PageDirty(page)) + __pte = pte_mkdirty(__pte); return __pte; } @@ -1313,7 +1350,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) unsigned long sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); - if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { + if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) { asm volatile( " .insn rrf,0xb98e0000,%2,%3,0,0" : "=m" (*pmdp) @@ -1324,24 +1361,68 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) } } +static inline void __pmd_csp(pmd_t *pmdp) +{ + register unsigned long reg2 asm("2") = pmd_val(*pmdp); + register unsigned long reg3 asm("3") = pmd_val(*pmdp) | + _SEGMENT_ENTRY_INVALID; + register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; + + asm volatile( + " csp %1,%3" + : "=m" (*pmdp) + : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); +} + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) { /* - * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) + * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx) * Convert to segment table entry format. */ if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) return pgprot_val(SEGMENT_NONE); - if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) - return pgprot_val(SEGMENT_RO); - return pgprot_val(SEGMENT_RW); + if (pgprot_val(pgprot) == pgprot_val(PAGE_READ)) + return pgprot_val(SEGMENT_READ); + return pgprot_val(SEGMENT_WRITE); +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ +#ifdef CONFIG_64BIT + if (pmd_prot_none(pmd)) { + pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; + } else { + pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; + pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; + } +#endif + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ +#ifdef CONFIG_64BIT + if (pmd_prot_none(pmd)) { + pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; + } else { + pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; + pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; + } +#endif + return pmd; } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { + int young; + + young = pmd_young(pmd); pmd_val(pmd) &= _SEGMENT_CHG_MASK; pmd_val(pmd) |= massage_pgprot_pmd(newprot); + if (young) + pmd = pmd_mkyoung(pmd); return pmd; } @@ -1349,14 +1430,14 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) { pmd_t __pmd; pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); - return __pmd; + return pmd_mkyoung(__pmd); } static inline pmd_t pmd_mkwrite(pmd_t pmd) { - /* Do not clobber _HPAGE_TYPE_NONE pages! */ - if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) - pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; + /* Do not clobber PROT_NONE segments! */ + if (!pmd_prot_none(pmd)) + pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; return pmd; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ @@ -1378,7 +1459,7 @@ static inline int pmd_trans_splitting(pmd_t pmd) static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t entry) { - if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1) pmd_val(entry) |= _SEGMENT_ENTRY_CO; *pmdp = entry; } @@ -1391,7 +1472,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_wrprotect(pmd_t pmd) { - pmd_val(pmd) |= _SEGMENT_ENTRY_RO; + /* Do not clobber PROT_NONE segments! */ + if (!pmd_prot_none(pmd)) + pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; return pmd; } @@ -1401,50 +1484,16 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) return pmd; } -static inline pmd_t pmd_mkold(pmd_t pmd) -{ - /* No referenced bit in the segment table entry. */ - return pmd; -} - -static inline pmd_t pmd_mkyoung(pmd_t pmd) -{ - /* No referenced bit in the segment table entry. */ - return pmd; -} - #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; - long tmp, rc; - int counter; + pmd_t pmd; - rc = 0; - if (MACHINE_HAS_RRBM) { - counter = PTRS_PER_PTE >> 6; - asm volatile( - "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */ - " ogr %1,%0\n" - " la %3,0(%4,%3)\n" - " brct %2,0b\n" - : "=&d" (tmp), "+&d" (rc), "+d" (counter), - "+a" (pmd_addr) - : "a" (64 * 4096UL) : "cc"); - rc = !!rc; - } else { - counter = PTRS_PER_PTE; - asm volatile( - "0: rrbe 0,%2\n" - " la %2,0(%3,%2)\n" - " brc 12,1f\n" - " lhi %0,1\n" - "1: brct %1,0b\n" - : "+d" (rc), "+d" (counter), "+a" (pmd_addr) - : "a" (4096UL) : "cc"); - } - return rc; + pmd = *pmdp; + __pmd_idte(address, pmdp); + *pmdp = pmd_mkold(pmd); + return pmd_young(pmd); } #define __HAVE_ARCH_PMDP_GET_AND_CLEAR @@ -1510,10 +1559,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd) * exception will occur instead of a page translation exception. The * specifiation exception has the bad habit not to store necessary * information in the lowcore. - * Bit 21 and bit 22 are the page invalid bit and the page protection - * bit. We set both to indicate a swapped page. - * Bit 30 and 31 are used to distinguish the different page types. For - * a swapped page these bits need to be zero. + * Bits 21, 22, 30 and 31 are used to indicate the page type. + * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 * This leaves the bits 1-19 and bits 24-29 to store type and offset. * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 * plus 24 for the offset. @@ -1527,10 +1574,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd) * exception will occur instead of a page translation exception. The * specifiation exception has the bad habit not to store necessary * information in the lowcore. - * Bit 53 and bit 54 are the page invalid bit and the page protection - * bit. We set both to indicate a swapped page. - * Bit 62 and 63 are used to distinguish the different page types. For - * a swapped page these bits need to be zero. + * Bits 53, 54, 62 and 63 are used to indicate the page type. + * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 * This leaves the bits 0-51 and bits 56-61 to store type and offset. * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 * plus 56 for the offset. @@ -1547,7 +1592,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; offset &= __SWP_OFFSET_MASK; - pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | + pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); return pte; } @@ -1570,7 +1615,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define pgoff_to_pte(__off) \ ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ - | _PAGE_TYPE_FILE }) + | _PAGE_INVALID | _PAGE_PROTECT }) #endif /* !__ASSEMBLY__ */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 6b499870662f..b0e6435b2f02 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -91,7 +91,15 @@ struct thread_struct { #endif }; -#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */ +/* Flag to disable transactions. */ +#define PER_FLAG_NO_TE 1UL +/* Flag to enable random transaction aborts. */ +#define PER_FLAG_TE_ABORT_RAND 2UL +/* Flag to specify random transaction abort mode: + * - abort each transaction at a random instruction before TEND if set. + * - abort random transactions at a random instruction if cleared. + */ +#define PER_FLAG_TE_ABORT_RAND_TEND 4UL typedef struct thread_struct thread_struct; diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h new file mode 100644 index 000000000000..5b3e48ef534b --- /dev/null +++ b/arch/s390/include/asm/serial.h @@ -0,0 +1,6 @@ +#ifndef _ASM_S390_SERIAL_H +#define _ASM_S390_SERIAL_H + +#define BASE_BAUD 0 + +#endif /* _ASM_S390_SERIAL_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index f3a9e0f92704..6dbd559763c9 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -8,9 +8,10 @@ #define __ASM_SWITCH_TO_H #include <linux/thread_info.h> +#include <asm/ptrace.h> extern struct task_struct *__switch_to(void *, void *); -extern void update_per_regs(struct task_struct *task); +extern void update_cr_regs(struct task_struct *task); static inline void save_fp_regs(s390_fp_regs *fpregs) { @@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) static inline void save_access_regs(unsigned int *acrs) { - asm volatile("stam 0,15,%0" : "=Q" (*acrs)); + typedef struct { int _[NUM_ACRS]; } acrstype; + + asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs)); } static inline void restore_access_regs(unsigned int *acrs) { - asm volatile("lam 0,15,%0" : : "Q" (*acrs)); + typedef struct { int _[NUM_ACRS]; } acrstype; + + asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); } #define switch_to(prev,next,last) do { \ @@ -86,7 +91,7 @@ static inline void restore_access_regs(unsigned int *acrs) restore_fp_regs(&next->thread.fp_regs); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ - update_per_regs(next); \ + update_cr_regs(next); \ } \ prev = __switch_to(prev,next); \ } while (0) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..2cb846c4b37f 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -32,6 +32,7 @@ struct mmu_gather { struct mm_struct *mm; struct mmu_table_batch *batch; unsigned int fullmm; + unsigned long start, end; }; struct mmu_table_batch { @@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned int full_mm_flush) + unsigned long start, + unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); tlb->batch = NULL; if (tlb->fullmm) __tlb_flush_mm(mm); @@ -59,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, static inline void tlb_flush_mmu(struct mmu_gather *tlb) { + __tlb_flush_mm_lazy(tlb->mm); tlb_table_flush(tlb); } static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - tlb_table_flush(tlb); + tlb_flush_mmu(tlb); } /* diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 6b32af30878c..f9fef0425fee 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) __tlb_flush_full(mm); } -static inline void __tlb_flush_mm_cond(struct mm_struct * mm) +static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) { if (mm->context.flush_mm) { __tlb_flush_mm(mm); @@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void flush_tlb_mm(struct mm_struct *mm) { - __tlb_flush_mm_cond(mm); + __tlb_flush_mm_lazy(mm); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __tlb_flush_mm_cond(vma->vm_mm); + __tlb_flush_mm_lazy(vma->vm_mm); } static inline void flush_tlb_kernel_range(unsigned long start, |