diff options
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/barrier.h | 9 | ||||
-rw-r--r-- | arch/s390/include/asm/clp.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/cpu_mf.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/dma-mapping.h | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/irq.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 15 | ||||
-rw-r--r-- | arch/s390/include/asm/mman.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/page.h | 22 | ||||
-rw-r--r-- | arch/s390/include/asm/pci.h | 11 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 132 | ||||
-rw-r--r-- | arch/s390/include/asm/sclp.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/setup.h | 22 | ||||
-rw-r--r-- | arch/s390/include/asm/signal.h | 19 | ||||
-rw-r--r-- | arch/s390/include/asm/timex.h | 18 | ||||
-rw-r--r-- | arch/s390/include/asm/unistd.h | 3 |
15 files changed, 146 insertions, 125 deletions
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 10a508802940..16760eeb79b0 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -13,15 +13,12 @@ * to devices. */ -static inline void mb(void) -{ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - /* Fast-BCR without checkpoint synchronization */ - asm volatile("bcr 14,0" : : : "memory"); +/* Fast-BCR without checkpoint synchronization */ +#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) #else - asm volatile("bcr 15,0" : : : "memory"); +#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) #endif -} #define rmb() mb() #define wmb() mb() diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h index 6c3aecc245ff..a0e71a501f7c 100644 --- a/arch/s390/include/asm/clp.h +++ b/arch/s390/include/asm/clp.h @@ -2,7 +2,7 @@ #define _ASM_S390_CLP_H /* CLP common request & response block size */ -#define CLP_BLK_SIZE (PAGE_SIZE * 2) +#define CLP_BLK_SIZE PAGE_SIZE struct clp_req_hdr { u16 len; diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 35f0020b7ba7..f1eddd150dd7 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -34,12 +34,12 @@ /* CPU measurement facility support */ static inline int cpum_cf_avail(void) { - return MACHINE_HAS_SPP && test_facility(67); + return MACHINE_HAS_LPP && test_facility(67); } static inline int cpum_sf_avail(void) { - return MACHINE_HAS_SPP && test_facility(68); + return MACHINE_HAS_LPP && test_facility(68); } diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 8a32f7dfd3af..9411db653bac 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -19,9 +19,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) } extern int dma_set_mask(struct device *dev, u64 mask); -extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); + +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ +} #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 7def77302d63..87c17bfb2968 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -41,6 +41,7 @@ enum interruption_class { IRQIO_CSC, IRQIO_PCI, IRQIO_MSI, + IRQIO_VIR, NMI_NMI, CPU_RST, NR_ARCH_IRQS diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b7841546991f..16bd5d169cdb 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -20,9 +20,7 @@ #include <asm/cpu.h> #define KVM_MAX_VCPUS 64 -#define KVM_MEMORY_SLOTS 32 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_USER_MEM_SLOTS 32 struct sca_entry { atomic_t scn; @@ -76,8 +74,11 @@ struct kvm_s390_sie_block { __u64 epoch; /* 0x0038 */ __u8 reserved40[4]; /* 0x0040 */ #define LCTL_CR0 0x8000 +#define LCTL_CR6 0x0200 +#define LCTL_CR14 0x0002 __u16 lctl; /* 0x0044 */ __s16 icpua; /* 0x0046 */ +#define ICTL_LPSW 0x00400000 __u32 ictl; /* 0x0048 */ __u32 eca; /* 0x004c */ __u8 icptcode; /* 0x0050 */ @@ -127,6 +128,7 @@ struct kvm_vcpu_stat { u32 deliver_prefix_signal; u32 deliver_restart_signal; u32 deliver_program_int; + u32 deliver_io_int; u32 exit_wait_state; u32 instruction_stidp; u32 instruction_spx; @@ -187,6 +189,11 @@ struct kvm_s390_emerg_info { __u16 code; }; +struct kvm_s390_mchk_info { + __u64 cr14; + __u64 mcic; +}; + struct kvm_s390_interrupt_info { struct list_head list; u64 type; @@ -197,6 +204,7 @@ struct kvm_s390_interrupt_info { struct kvm_s390_emerg_info emerg; struct kvm_s390_extcall_info extcall; struct kvm_s390_prefix_info prefix; + struct kvm_s390_mchk_info mchk; }; }; @@ -254,6 +262,7 @@ struct kvm_arch{ debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; struct gmap *gmap; + int css_support; }; extern int sie64a(struct kvm_s390_sie_block *, u64 *); diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 0e47a576d666..9977e08df5bd 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -9,7 +9,7 @@ #include <uapi/asm/mman.h> #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) -int s390_mmap_check(unsigned long addr, unsigned long len); -#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) +int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); +#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) #endif #endif /* __S390_MMAN_H__ */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a86ad4084073..75ce9b065f9f 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -155,28 +155,6 @@ static inline int page_reset_referenced(unsigned long addr) #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ /* - * Test and clear dirty bit in storage key. - * We can't clear the changed bit atomically. This is a potential - * race against modification of the referenced bit. This function - * should therefore only be called if it is not mapped in any - * address space. - * - * Note that the bit gets set whenever page content is changed. That means - * also when the page is modified by DMA or from inside the kernel. - */ -#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) -{ - unsigned char skey; - - skey = page_get_storage_key(pfn << PAGE_SHIFT); - if (!(skey & _PAGE_CHANGED)) - return 0; - page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped); - return 1; -} - -/* * Test and clear referenced bit in storage key. */ #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index b1fa93c606ad..05333b7f0469 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -160,9 +160,14 @@ void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *); int zpci_msihash_init(void); void zpci_msihash_exit(void); +#ifdef CONFIG_PCI /* Error handling and recovery */ void zpci_event_error(void *); void zpci_event_availability(void *); +#else /* CONFIG_PCI */ +static inline void zpci_event_error(void *e) {} +static inline void zpci_event_availability(void *e) {} +#endif /* CONFIG_PCI */ /* Helpers */ struct zpci_dev *get_zdev(struct pci_dev *); @@ -180,8 +185,10 @@ void zpci_dma_exit(void); /* Hotplug */ extern struct mutex zpci_list_lock; extern struct list_head zpci_list; -extern struct pci_hp_callback_ops hotplug_ops; -extern unsigned int pci_probe; +extern unsigned int s390_pci_probe; + +void zpci_register_hp_ops(struct pci_hp_callback_ops *); +void zpci_deregister_hp_ops(void); /* FMB */ int zpci_fmb_enable_device(struct zpci_dev *); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 098adbb62660..97de1200c849 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -29,6 +29,7 @@ #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/mm_types.h> +#include <linux/page-flags.h> #include <asm/bug.h> #include <asm/page.h> @@ -221,13 +222,15 @@ extern unsigned long MODULES_END; /* Software bits in the page table entry */ #define _PAGE_SWT 0x001 /* SW pte type bit t */ #define _PAGE_SWX 0x002 /* SW pte type bit x */ -#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */ -#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */ -#define _PAGE_SPECIAL 0x010 /* SW associated with special page */ +#define _PAGE_SWC 0x004 /* SW pte changed bit */ +#define _PAGE_SWR 0x008 /* SW pte referenced bit */ +#define _PAGE_SWW 0x010 /* SW pte write bit */ +#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ #define __HAVE_ARCH_PTE_SPECIAL /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ + _PAGE_SWC | _PAGE_SWR) /* Six different types of pages. */ #define _PAGE_TYPE_EMPTY 0x400 @@ -321,6 +324,7 @@ extern unsigned long MODULES_END; /* Bits in the region table entry */ #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ +#define _REGION_ENTRY_RO 0x200 /* region protection bit */ #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ @@ -382,9 +386,11 @@ extern unsigned long MODULES_END; */ #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) #define PAGE_RO __pgprot(_PAGE_TYPE_RO) -#define PAGE_RW __pgprot(_PAGE_TYPE_RW) +#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) +#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) -#define PAGE_KERNEL PAGE_RW +#define PAGE_KERNEL PAGE_RWC +#define PAGE_SHARED PAGE_KERNEL #define PAGE_COPY PAGE_RO /* @@ -631,23 +637,23 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); /* Clear page changed & referenced bit in the storage key */ if (bits & _PAGE_CHANGED) - page_set_storage_key(address, skey ^ bits, 1); + page_set_storage_key(address, skey ^ bits, 0); else if (bits) page_reset_referenced(address); /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ /* Get host changed & referenced bits from pgste */ bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; - /* Clear host bits in pgste. */ + /* Transfer page changed & referenced bit to kvm user bits */ + pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ + /* Clear relevant host bits in pgste. */ pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); /* Copy page access key and fetch protection bit to pgste */ pgste_val(pgste) |= (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; - /* Transfer changed and referenced to kvm user bits */ - pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ - /* Transfer changed & referenced to pte sofware bits */ - pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */ + /* Transfer referenced bit to pte */ + pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; #endif return pgste; @@ -660,20 +666,25 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) if (!pte_present(*ptep)) return pgste; + /* Get referenced bit from storage key */ young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); - /* Transfer page referenced bit to pte software bit (host view) */ - if (young || (pgste_val(pgste) & RCP_HR_BIT)) + if (young) + pgste_val(pgste) |= RCP_GR_BIT; + /* Get host referenced bit from pgste */ + if (pgste_val(pgste) & RCP_HR_BIT) { + pgste_val(pgste) &= ~RCP_HR_BIT; + young = 1; + } + /* Transfer referenced bit to kvm user bits and pte */ + if (young) { + pgste_val(pgste) |= KVM_UR_BIT; pte_val(*ptep) |= _PAGE_SWR; - /* Clear host referenced bit in pgste. */ - pgste_val(pgste) &= ~RCP_HR_BIT; - /* Transfer page referenced bit to guest bit in pgste */ - pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */ + } #endif return pgste; - } -static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) +static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) { #ifdef CONFIG_PGSTE unsigned long address; @@ -687,10 +698,23 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) /* Set page access key and fetch protection bit from pgste */ nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; if (okey != nkey) - page_set_storage_key(address, nkey, 1); + page_set_storage_key(address, nkey, 0); #endif } +static inline void pgste_set_pte(pte_t *ptep, pte_t entry) +{ + if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { + /* + * Without enhanced suppression-on-protection force + * the dirty bit on for all writable ptes. + */ + pte_val(entry) |= _PAGE_SWC; + pte_val(entry) &= ~_PAGE_RO; + } + *ptep = entry; +} + /** * struct gmap_struct - guest address space * @mm: pointer to the parent mm_struct @@ -749,11 +773,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_set_pte(ptep, pgste, entry); - *ptep = entry; + pgste_set_key(ptep, pgste, entry); + pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); - } else + } else { + if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) + pte_val(entry) |= _PAGE_CO; *ptep = entry; + } } /* @@ -762,16 +789,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, */ static inline int pte_write(pte_t pte) { - return (pte_val(pte) & _PAGE_RO) == 0; + return (pte_val(pte) & _PAGE_SWW) != 0; } static inline int pte_dirty(pte_t pte) { -#ifdef CONFIG_PGSTE - if (pte_val(pte) & _PAGE_SWC) - return 1; -#endif - return 0; + return (pte_val(pte) & _PAGE_SWC) != 0; } static inline int pte_young(pte_t pte) @@ -821,11 +844,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) |= pgprot_val(newprot); + if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) + pte_val(pte) &= ~_PAGE_RO; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_SWW; /* Do not clobber _PAGE_TYPE_NONE pages! */ if (!(pte_val(pte) & _PAGE_INVALID)) pte_val(pte) |= _PAGE_RO; @@ -834,20 +860,26 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) &= ~_PAGE_RO; + pte_val(pte) |= _PAGE_SWW; + if (pte_val(pte) & _PAGE_SWC) + pte_val(pte) &= ~_PAGE_RO; return pte; } static inline pte_t pte_mkclean(pte_t pte) { -#ifdef CONFIG_PGSTE pte_val(pte) &= ~_PAGE_SWC; -#endif + /* Do not clobber _PAGE_TYPE_NONE pages! */ + if (!(pte_val(pte) & _PAGE_INVALID)) + pte_val(pte) |= _PAGE_RO; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { + pte_val(pte) |= _PAGE_SWC; + if (pte_val(pte) & _PAGE_SWW) + pte_val(pte) &= ~_PAGE_RO; return pte; } @@ -885,10 +917,10 @@ static inline pte_t pte_mkhuge(pte_t pte) pte_val(pte) |= _SEGMENT_ENTRY_INV; } /* - * Clear SW pte bits SWT and SWX, there are no SW bits in a segment - * table entry. + * Clear SW pte bits, there are no SW bits in a segment table entry. */ - pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); + pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | + _PAGE_SWR | _PAGE_SWW); /* * Also set the change-override bit because we don't need dirty bit * tracking for hugetlbfs pages. @@ -1040,9 +1072,11 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long address, pte_t *ptep, pte_t pte) { - *ptep = pte; - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); + } else + *ptep = pte; } #define __HAVE_ARCH_PTEP_CLEAR_FLUSH @@ -1110,10 +1144,13 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, if (!mm_exclusive(mm)) __ptep_ipte(address, ptep); - *ptep = pte_wrprotect(pte); + pte = pte_wrprotect(pte); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, pgste); + } else + *ptep = pte; } return pte; } @@ -1131,10 +1168,12 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, pgste = pgste_get_lock(ptep); __ptep_ipte(address, ptep); - *ptep = entry; - if (mm_has_pgste(vma->vm_mm)) + if (mm_has_pgste(vma->vm_mm)) { + pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); + } else + *ptep = entry; return 1; } @@ -1152,8 +1191,13 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) { unsigned long physpage = page_to_phys(page); + pte_t __pte = mk_pte_phys(physpage, pgprot); - return mk_pte_phys(physpage, pgprot); + if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { + pte_val(__pte) |= _PAGE_SWC; + pte_val(__pte) &= ~_PAGE_RO; + } + return __pte; } #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) @@ -1245,6 +1289,8 @@ static inline int pmd_trans_splitting(pmd_t pmd) static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t entry) { + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) + pmd_val(entry) |= _SEGMENT_ENTRY_CO; *pmdp = entry; } diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 833788693f09..06a136136047 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -46,7 +46,6 @@ int sclp_cpu_deconfigure(u8 cpu); void sclp_facilities_detect(void); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); -u8 sclp_get_fac85(void); int sclp_sdias_blk_count(void); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_chp_configure(struct chp_id chpid); diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index f69f76b3447a..ff67d730c00c 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -64,17 +64,18 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_VM (1UL << 0) #define MACHINE_FLAG_IEEE (1UL << 1) -#define MACHINE_FLAG_CSP (1UL << 3) -#define MACHINE_FLAG_MVPG (1UL << 4) -#define MACHINE_FLAG_DIAG44 (1UL << 5) -#define MACHINE_FLAG_IDTE (1UL << 6) -#define MACHINE_FLAG_DIAG9C (1UL << 7) -#define MACHINE_FLAG_MVCOS (1UL << 8) -#define MACHINE_FLAG_KVM (1UL << 9) +#define MACHINE_FLAG_CSP (1UL << 2) +#define MACHINE_FLAG_MVPG (1UL << 3) +#define MACHINE_FLAG_DIAG44 (1UL << 4) +#define MACHINE_FLAG_IDTE (1UL << 5) +#define MACHINE_FLAG_DIAG9C (1UL << 6) +#define MACHINE_FLAG_MVCOS (1UL << 7) +#define MACHINE_FLAG_KVM (1UL << 8) +#define MACHINE_FLAG_ESOP (1UL << 9) #define MACHINE_FLAG_EDAT1 (1UL << 10) #define MACHINE_FLAG_EDAT2 (1UL << 11) #define MACHINE_FLAG_LPAR (1UL << 12) -#define MACHINE_FLAG_SPP (1UL << 13) +#define MACHINE_FLAG_LPP (1UL << 13) #define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_RRBM (1UL << 16) @@ -84,6 +85,7 @@ extern unsigned int s390_user_mode; #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) +#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 @@ -96,7 +98,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_MVCOS (0) #define MACHINE_HAS_EDAT1 (0) #define MACHINE_HAS_EDAT2 (0) -#define MACHINE_HAS_SPP (0) +#define MACHINE_HAS_LPP (0) #define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TE (0) #define MACHINE_HAS_RRBM (0) @@ -109,7 +111,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) -#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) +#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h index db7ddfaf5b79..abf9e5735943 100644 --- a/arch/s390/include/asm/signal.h +++ b/arch/s390/include/asm/signal.h @@ -21,22 +21,5 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - void (*sa_restorer)(void); - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; - +#define __ARCH_HAS_SA_RESTORER #endif diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 4c060bb5b8ea..8ad8af915032 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -15,7 +15,7 @@ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL /* Inline functions for clock register access. */ -static inline int set_clock(__u64 time) +static inline int set_tod_clock(__u64 time) { int cc; @@ -27,7 +27,7 @@ static inline int set_clock(__u64 time) return cc; } -static inline int store_clock(__u64 *time) +static inline int store_tod_clock(__u64 *time) { int cc; @@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_clock(void) +static inline unsigned long long get_tod_clock(void) { unsigned long long clk; @@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void) return clk; } -static inline void get_clock_ext(char *clk) +static inline void get_tod_clock_ext(char *clk) { asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); } -static inline unsigned long long get_clock_xt(void) +static inline unsigned long long get_tod_clock_xt(void) { unsigned char clk[16]; - get_clock_ext(clk); + get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } static inline cycles_t get_cycles(void) { - return (cycles_t) get_clock() >> 2; + return (cycles_t) get_tod_clock() >> 2; } int get_sync_clock(unsigned long long *clock); @@ -123,9 +123,9 @@ extern u64 sched_clock_base_cc; * function, otherwise the returned value is not guaranteed to * be monotonic. */ -static inline unsigned long long get_clock_monotonic(void) +static inline unsigned long long get_tod_clock_monotonic(void) { - return get_clock_xt() - sched_clock_base_cc; + return get_tod_clock_xt() - sched_clock_base_cc; } /** diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 636530872516..a6667a952969 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -43,15 +43,12 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND # ifndef CONFIG_64BIT # define __ARCH_WANT_STAT64 # define __ARCH_WANT_SYS_TIME # endif # ifdef CONFIG_COMPAT # define __ARCH_WANT_COMPAT_SYS_TIME -# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND # endif #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK |