diff options
author | Len Brown <len.brown@intel.com> | 2009-04-05 10:14:15 +0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-04-05 10:14:15 +0400 |
commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /arch/ia64/include | |
parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) | |
download | linux-478c6a43fcbc6c11609f8cee7c7b57223907754f.tar.xz |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/ia64/include')
36 files changed, 1229 insertions, 342 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 1f912d927585..36c0009dbece 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -11,99 +11,128 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK -struct dma_mapping_ops { - int (*mapping_error)(struct device *dev, - dma_addr_t dma_addr); - void* (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, - size_t size, int direction); - void (*unmap_single)(struct device *dev, dma_addr_t addr, - size_t size, int direction); - void (*sync_single_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, size_t size, - int direction); - void (*sync_single_for_device)(struct device *hwdev, - dma_addr_t dma_handle, size_t size, - int direction); - void (*sync_single_range_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction); - void (*sync_single_range_for_device)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction); - void (*sync_sg_for_cpu)(struct device *hwdev, - struct scatterlist *sg, int nelems, - int direction); - void (*sync_sg_for_device)(struct device *hwdev, - struct scatterlist *sg, int nelems, - int direction); - int (*map_sg)(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); - void (*unmap_sg)(struct device *hwdev, - struct scatterlist *sg, int nents, - int direction); - int (*dma_supported_op)(struct device *hwdev, u64 mask); - int is_phys; -}; - -extern struct dma_mapping_ops *dma_ops; +extern struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); -#define dma_alloc_coherent(dev, size, handle, gfp) \ - platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) +extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, + enum dma_data_direction); +extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction); -/* coherent mem. is cheap */ -static inline void * -dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag) +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *daddr, gfp_t gfp) { - return dma_alloc_coherent(dev, size, dma_handle, flag); + struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->alloc_coherent(dev, size, daddr, gfp); } -#define dma_free_coherent platform_dma_free_coherent -static inline void -dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *caddr, dma_addr_t daddr) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->free_coherent(dev, size, caddr, daddr); +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, + void *caddr, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->map_page(dev, virt_to_page(caddr), + (unsigned long)caddr & ~PAGE_MASK, size, + dir, attrs); +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->unmap_page(dev, daddr, size, dir, attrs); +} + +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) + +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->map_sg(dev, sgl, nents, dir, attrs); +} + +static inline void dma_unmap_sg_attrs(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->unmap_sg(dev, sgl, nents, dir, attrs); +} + +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, + size_t size, + enum dma_data_direction dir) { - dma_free_coherent(dev, size, cpu_addr, dma_handle); + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->sync_single_for_cpu(dev, daddr, size, dir); } -#define dma_map_single_attrs platform_dma_map_single_attrs -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, int dir) + +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, + int nents, enum dma_data_direction dir) { - return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->sync_sg_for_cpu(dev, sgl, nents, dir); } -#define dma_map_sg_attrs platform_dma_map_sg_attrs -static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, int dir) + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t daddr, + size_t size, + enum dma_data_direction dir) { - return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->sync_single_for_device(dev, daddr, size, dir); } -#define dma_unmap_single_attrs platform_dma_unmap_single_attrs -static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, - size_t size, int dir) + +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, + int nents, + enum dma_data_direction dir) { - return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); + struct dma_map_ops *ops = platform_dma_get_ops(dev); + ops->sync_sg_for_device(dev, sgl, nents, dir); } -#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, int dir) + +static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->mapping_error(dev, daddr); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) { - return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); + struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->map_page(dev, page, offset, size, dir, NULL); } -#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu -#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu -#define dma_sync_single_for_device platform_dma_sync_single_for_device -#define dma_sync_sg_for_device platform_dma_sync_sg_for_device -#define dma_mapping_error platform_dma_mapping_error -#define dma_map_page(dev, pg, off, size, dir) \ - dma_map_single(dev, page_address(pg) + (off), (size), (dir)) -#define dma_unmap_page(dev, dma_addr, size, dir) \ - dma_unmap_single(dev, dma_addr, size, dir) +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + dma_unmap_single(dev, addr, size, dir); +} /* * Rest of this file is part of the "Advanced DMA API". Use at your own risk. @@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ dma_sync_single_for_device(dev, dma_handle, size, dir) -#define dma_supported platform_dma_supported +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->dma_supported(dev, mask); +} static inline int dma_set_mask (struct device *dev, u64 mask) @@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) -{ - return dma_ops; -} - - - #endif /* _ASM_IA64_DMA_MAPPING_H */ diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h index 3859558ff0a4..0c26157cffa5 100644 --- a/arch/ia64/include/asm/fpu.h +++ b/arch/ia64/include/asm/fpu.h @@ -6,8 +6,6 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <asm/types.h> - /* floating point status register: */ #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ #define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h index 0f5b55921758..c2c5fd8fcac4 100644 --- a/arch/ia64/include/asm/gcc_intrin.h +++ b/arch/ia64/include/asm/gcc_intrin.h @@ -6,6 +6,7 @@ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> */ +#include <linux/types.h> #include <linux/compiler.h> /* define this macro to get some asm stmts included in 'c' files */ diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index a3e44a5ed497..111ed5222892 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h @@ -10,6 +10,7 @@ #ifndef __ASSEMBLY__ +#include <linux/types.h> /* include compiler specific intrinsics */ #include <asm/ia64regs.h> #ifdef __INTEL_COMPILER @@ -201,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); #ifndef __ASSEMBLY__ #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) -#define IA64_INTRINSIC_API(name) pv_cpu_ops.name +#ifdef ASM_SUPPORTED +# define IA64_INTRINSIC_API(name) paravirt_ ## name +#else +# define IA64_INTRINSIC_API(name) pv_cpu_ops.name +#endif #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name #else #define IA64_INTRINSIC_API(name) ia64_native_ ## name diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index bfa86b6af7cd..18a7e49abbc5 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -21,8 +21,7 @@ * */ -#include <asm/types.h> - +#include <linux/types.h> #include <linux/ioctl.h> /* Select x86 specific features in <linux/kvm.h> */ @@ -166,7 +165,40 @@ struct saved_vpd { unsigned long vcpuid[5]; unsigned long vpsr; unsigned long vpr; - unsigned long vcr[128]; + union { + unsigned long vcr[128]; + struct { + unsigned long dcr; + unsigned long itm; + unsigned long iva; + unsigned long rsv1[5]; + unsigned long pta; + unsigned long rsv2[7]; + unsigned long ipsr; + unsigned long isr; + unsigned long rsv3; + unsigned long iip; + unsigned long ifa; + unsigned long itir; + unsigned long iipa; + unsigned long ifs; + unsigned long iim; + unsigned long iha; + unsigned long rsv4[38]; + unsigned long lid; + unsigned long ivr; + unsigned long tpr; + unsigned long eoi; + unsigned long irr[4]; + unsigned long itv; + unsigned long pmv; + unsigned long cmcv; + unsigned long rsv5[5]; + unsigned long lrr0; + unsigned long lrr1; + unsigned long rsv6[46]; + }; + }; }; struct kvm_regs { @@ -214,4 +246,18 @@ struct kvm_sregs { struct kvm_fpu { }; +#define KVM_IA64_VCPU_STACK_SHIFT 16 +#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT) + +struct kvm_ia64_vcpu_stack { + unsigned char stack[KVM_IA64_VCPU_STACK_SIZE]; +}; + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + #endif diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 348663661659..4542651e6acb 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -112,7 +112,11 @@ #define VCPU_STRUCT_SHIFT 16 #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) -#define KVM_STK_OFFSET VCPU_STRUCT_SIZE +/* + * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h + */ +#define KVM_STK_SHIFT 16 +#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) #define KVM_VM_STRUCT_SHIFT 19 #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) @@ -153,10 +157,10 @@ struct kvm_vm_data { struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; }; -#define VCPU_BASE(n) KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, vcpu_data[n]) -#define VM_BASE KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, kvm_vm_struct) +#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ + offsetof(struct kvm_vm_data, vcpu_data[n])) +#define KVM_VM_BASE (KVM_VM_DATA_BASE + \ + offsetof(struct kvm_vm_data, kvm_vm_struct)) #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ offsetof(struct kvm_vm_data, kvm_mem_dirty_log) @@ -235,8 +239,6 @@ struct kvm_vm_data { struct kvm; struct kvm_vcpu; -struct kvm_guest_debug{ -}; struct kvm_mmio_req { uint64_t addr; /* physical address */ @@ -462,6 +464,8 @@ struct kvm_arch { unsigned long metaphysical_rr4; unsigned long vmm_init_rr; + int online_vcpus; + struct kvm_ioapic *vioapic; struct kvm_vm_stat stat; struct kvm_sal_data rdv_sal_data; diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index fe87b2121707..367d299d9938 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -11,7 +11,6 @@ #define _ASM_IA64_MACHVEC_H #include <linux/types.h> -#include <linux/swiotlb.h> /* forward declarations: */ struct device; @@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); -typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); -typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); -typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); -typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); -typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); -typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); -typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); -typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); -typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); -typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); -typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); -typedef int ia64_mv_dma_supported (struct device *, u64); - -typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); -typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); -typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); -typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); typedef u64 ia64_mv_dma_get_required_mask (struct device *); +typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus) extern void machvec_setup (char **); extern void machvec_timer_interrupt (int, void *); -extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); -extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); extern void machvec_tlb_migrate_finish (struct mm_struct *); # if defined (CONFIG_IA64_HP_SIM) @@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_global_tlb_purge ia64_mv.global_tlb_purge # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish # define platform_dma_init ia64_mv.dma_init -# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent -# define platform_dma_free_coherent ia64_mv.dma_free_coherent -# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs -# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs -# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs -# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs -# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu -# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu -# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device -# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device -# define platform_dma_mapping_error ia64_mv.dma_mapping_error -# define platform_dma_supported ia64_mv.dma_supported # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask +# define platform_dma_get_ops ia64_mv.dma_get_ops # define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem @@ -203,19 +173,8 @@ struct ia64_machine_vector { ia64_mv_global_tlb_purge_t *global_tlb_purge; ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; ia64_mv_dma_init *dma_init; - ia64_mv_dma_alloc_coherent *dma_alloc_coherent; - ia64_mv_dma_free_coherent *dma_free_coherent; - ia64_mv_dma_map_single_attrs *dma_map_single_attrs; - ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; - ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; - ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; - ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; - ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; - ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; - ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; - ia64_mv_dma_mapping_error *dma_mapping_error; - ia64_mv_dma_supported *dma_supported; ia64_mv_dma_get_required_mask *dma_get_required_mask; + ia64_mv_dma_get_ops *dma_get_ops; ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_local_vector_to_irq *local_vector_to_irq; ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; @@ -254,19 +213,8 @@ struct ia64_machine_vector { platform_global_tlb_purge, \ platform_tlb_migrate_finish, \ platform_dma_init, \ - platform_dma_alloc_coherent, \ - platform_dma_free_coherent, \ - platform_dma_map_single_attrs, \ - platform_dma_unmap_single_attrs, \ - platform_dma_map_sg_attrs, \ - platform_dma_unmap_sg_attrs, \ - platform_dma_sync_single_for_cpu, \ - platform_dma_sync_sg_for_cpu, \ - platform_dma_sync_single_for_device, \ - platform_dma_sync_sg_for_device, \ - platform_dma_mapping_error, \ - platform_dma_supported, \ platform_dma_get_required_mask, \ + platform_dma_get_ops, \ platform_irq_to_vector, \ platform_local_vector_to_irq, \ platform_pci_get_legacy_mem, \ @@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline); # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. # endif /* CONFIG_IA64_GENERIC */ +extern void swiotlb_dma_init(void); +extern struct dma_map_ops *dma_get_ops(struct device *); + /* * Define default versions so we can extend machvec for new platforms without having * to update the machvec files for all existing platforms. @@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline); # define platform_kernel_launch_event machvec_noop #endif #ifndef platform_dma_init -# define platform_dma_init swiotlb_init -#endif -#ifndef platform_dma_alloc_coherent -# define platform_dma_alloc_coherent swiotlb_alloc_coherent -#endif -#ifndef platform_dma_free_coherent -# define platform_dma_free_coherent swiotlb_free_coherent -#endif -#ifndef platform_dma_map_single_attrs -# define platform_dma_map_single_attrs swiotlb_map_single_attrs -#endif -#ifndef platform_dma_unmap_single_attrs -# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs -#endif -#ifndef platform_dma_map_sg_attrs -# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs -#endif -#ifndef platform_dma_unmap_sg_attrs -# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs -#endif -#ifndef platform_dma_sync_single_for_cpu -# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu -#endif -#ifndef platform_dma_sync_sg_for_cpu -# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu -#endif -#ifndef platform_dma_sync_single_for_device -# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device -#endif -#ifndef platform_dma_sync_sg_for_device -# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device -#endif -#ifndef platform_dma_mapping_error -# define platform_dma_mapping_error swiotlb_dma_mapping_error +# define platform_dma_init swiotlb_dma_init #endif -#ifndef platform_dma_supported -# define platform_dma_supported swiotlb_dma_supported +#ifndef platform_dma_get_ops +# define platform_dma_get_ops dma_get_ops #endif #ifndef platform_dma_get_required_mask # define platform_dma_get_required_mask ia64_dma_get_required_mask diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h index 3400b561e711..6ab1de5c45ef 100644 --- a/arch/ia64/include/asm/machvec_dig_vtd.h +++ b/arch/ia64/include/asm/machvec_dig_vtd.h @@ -2,14 +2,6 @@ #define _ASM_IA64_MACHVEC_DIG_VTD_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; -extern ia64_mv_dma_free_coherent vtd_free_coherent; -extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; -extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; -extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; -extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; -extern ia64_mv_dma_supported iommu_dma_supported; -extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; extern ia64_mv_dma_init pci_iommu_alloc; /* @@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc; #define platform_name "dig_vtd" #define platform_setup dig_setup #define platform_dma_init pci_iommu_alloc -#define platform_dma_alloc_coherent vtd_alloc_coherent -#define platform_dma_free_coherent vtd_free_coherent -#define platform_dma_map_single_attrs vtd_map_single_attrs -#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs -#define platform_dma_map_sg_attrs vtd_map_sg_attrs -#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs -#define platform_dma_sync_single_for_cpu machvec_dma_sync_single -#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg -#define platform_dma_sync_single_for_device machvec_dma_sync_single -#define platform_dma_sync_sg_for_device machvec_dma_sync_sg -#define platform_dma_supported iommu_dma_supported -#define platform_dma_mapping_error vtd_dma_mapping_error #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h index 2f57f5144b9f..3bd83d78a412 100644 --- a/arch/ia64/include/asm/machvec_hpzx1.h +++ b/arch/ia64/include/asm/machvec_hpzx1.h @@ -2,14 +2,7 @@ #define _ASM_IA64_MACHVEC_HPZX1_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; -extern ia64_mv_dma_free_coherent sba_free_coherent; -extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; -extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; -extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; -extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; -extern ia64_mv_dma_supported sba_dma_supported; -extern ia64_mv_dma_mapping_error sba_dma_mapping_error; +extern ia64_mv_dma_init sba_dma_init; /* * This stuff has dual use! @@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; */ #define platform_name "hpzx1" #define platform_setup dig_setup -#define platform_dma_init machvec_noop -#define platform_dma_alloc_coherent sba_alloc_coherent -#define platform_dma_free_coherent sba_free_coherent -#define platform_dma_map_single_attrs sba_map_single_attrs -#define platform_dma_unmap_single_attrs sba_unmap_single_attrs -#define platform_dma_map_sg_attrs sba_map_sg_attrs -#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs -#define platform_dma_sync_single_for_cpu machvec_dma_sync_single -#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg -#define platform_dma_sync_single_for_device machvec_dma_sync_single -#define platform_dma_sync_sg_for_device machvec_dma_sync_sg -#define platform_dma_supported sba_dma_supported -#define platform_dma_mapping_error sba_dma_mapping_error +#define platform_dma_init sba_dma_init #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h index a842cdda827b..1091ac39740c 100644 --- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h +++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h @@ -2,18 +2,7 @@ #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; -extern ia64_mv_dma_free_coherent hwsw_free_coherent; -extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; -extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; -extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; -extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; -extern ia64_mv_dma_supported hwsw_dma_supported; -extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; -extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; -extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu; -extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device; -extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; +extern ia64_mv_dma_get_ops hwsw_dma_get_ops; /* * This stuff has dual use! @@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; * the macros are used directly. */ #define platform_name "hpzx1_swiotlb" - #define platform_setup dig_setup #define platform_dma_init machvec_noop -#define platform_dma_alloc_coherent hwsw_alloc_coherent -#define platform_dma_free_coherent hwsw_free_coherent -#define platform_dma_map_single_attrs hwsw_map_single_attrs -#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs -#define platform_dma_map_sg_attrs hwsw_map_sg_attrs -#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs -#define platform_dma_supported hwsw_dma_supported -#define platform_dma_mapping_error hwsw_dma_mapping_error -#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu -#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu -#define platform_dma_sync_single_for_device hwsw_sync_single_for_device -#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device +#define platform_dma_get_ops hwsw_dma_get_ops #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h index f1a6e0d6dfa5..f061a30aac42 100644 --- a/arch/ia64/include/asm/machvec_sn2.h +++ b/arch/ia64/include/asm/machvec_sn2.h @@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed; extern ia64_mv_readw_t __sn_readw_relaxed; extern ia64_mv_readl_t __sn_readl_relaxed; extern ia64_mv_readq_t __sn_readq_relaxed; -extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; -extern ia64_mv_dma_free_coherent sn_dma_free_coherent; -extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; -extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; -extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; -extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; -extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; -extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; -extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; -extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; -extern ia64_mv_dma_mapping_error sn_dma_mapping_error; -extern ia64_mv_dma_supported sn_dma_supported; extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; +extern ia64_mv_dma_init sn_dma_init; extern ia64_mv_migrate_t sn_migrate; extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; @@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem #define platform_pci_legacy_read sn_pci_legacy_read #define platform_pci_legacy_write sn_pci_legacy_write -#define platform_dma_init machvec_noop -#define platform_dma_alloc_coherent sn_dma_alloc_coherent -#define platform_dma_free_coherent sn_dma_free_coherent -#define platform_dma_map_single_attrs sn_dma_map_single_attrs -#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs -#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs -#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs -#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu -#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu -#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device -#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device -#define platform_dma_mapping_error sn_dma_mapping_error -#define platform_dma_supported sn_dma_supported #define platform_dma_get_required_mask sn_dma_get_required_mask +#define platform_dma_init sn_dma_init #define platform_migrate sn_migrate #define platform_kernel_launch_event sn_kernel_launch_event #ifdef CONFIG_PCI_MSI diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 040bc87db930..7f2a456603cb 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h @@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) /* re-check, now that we've got the lock: */ context = mm->context; if (context == 0) { - cpus_clear(mm->cpu_vm_mask); + cpumask_clear(mm_cpumask(mm)); if (ia64_ctx.next >= ia64_ctx.limit) { ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, ia64_ctx.max_ctx, ia64_ctx.next); @@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) do { context = get_mmu_context(mm); - if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) - cpu_set(smp_processor_id(), mm->cpu_vm_mask); + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); reload_context(context); /* * in the unlikely event of a TLB-flush by another thread, diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index d2da61e4c49b..908eaef42a08 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h @@ -16,6 +16,12 @@ struct mod_arch_specific { struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *unwind; /* unwind-table section */ +#ifdef CONFIG_PARAVIRT + struct elf64_shdr *paravirt_bundles; + /* paravirt_alt_bundle_patch table */ + struct elf64_shdr *paravirt_insts; + /* paravirt_alt_inst_patch table */ +#endif unsigned long gp; /* global-pointer for module */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */ diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h new file mode 100644 index 000000000000..592c1047a0c5 --- /dev/null +++ b/arch/ia64/include/asm/msidef.h @@ -0,0 +1,42 @@ +#ifndef _IA64_MSI_DEF_H +#define _IA64_MSI_DEF_H + +/* + * Shifts for APIC-based data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) +#define MSI_DATA_VECTOR_MASK 0xffffff00 + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) + +/* + * Shift/mask fields for APIC-based bus address + */ + +#define MSI_ADDR_DEST_ID_SHIFT 4 +#define MSI_ADDR_HEADER 0xfee00000 + +#define MSI_ADDR_DEST_ID_MASK 0xfff0000f +#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT) + +#define MSI_ADDR_DEST_MODE_SHIFT 2 +#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT) +#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT) + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + +#endif/* _IA64_MSI_DEF_H */ diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h index 0a1026cca4fa..d2d46efb3e6e 100644 --- a/arch/ia64/include/asm/native/inst.h +++ b/arch/ia64/include/asm/native/inst.h @@ -30,6 +30,9 @@ #define __paravirt_work_processed_syscall_target \ ia64_work_processed_syscall +#define paravirt_fsyscall_table ia64_native_fsyscall_table +#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down + #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK # define PARAVIRT_POISON 0xdeadbeefbaadf00d # define CLOBBER(clob) \ @@ -74,6 +77,11 @@ (pred) mov reg = psr \ CLOBBER(clob) +#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ +(pred) mov reg = ar.itc \ + CLOBBER(clob) \ + CLOBBER_PRED(pred_clob) + #define MOV_TO_IFA(reg, clob) \ mov cr.ifa = reg \ CLOBBER(clob) @@ -158,6 +166,11 @@ #define RSM_PSR_DT \ rsm psr.dt +#define RSM_PSR_BE_I(clob0, clob1) \ + rsm psr.be | psr.i \ + CLOBBER(clob0) \ + CLOBBER(clob1) + #define SSM_PSR_DT_AND_SRLZ_I \ ssm psr.dt \ ;; \ diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h new file mode 100644 index 000000000000..be16ca9311bf --- /dev/null +++ b/arch/ia64/include/asm/native/patchlist.h @@ -0,0 +1,38 @@ +/****************************************************************************** + * arch/ia64/include/asm/native/inst.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define __paravirt_start_gate_fsyscall_patchlist \ + __ia64_native_start_gate_fsyscall_patchlist +#define __paravirt_end_gate_fsyscall_patchlist \ + __ia64_native_end_gate_fsyscall_patchlist +#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ + __ia64_native_start_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ + __ia64_native_end_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_start_gate_vtop_patchlist \ + __ia64_native_start_gate_vtop_patchlist +#define __paravirt_end_gate_vtop_patchlist \ + __ia64_native_end_gate_vtop_patchlist +#define __paravirt_start_gate_mckinley_e9_patchlist \ + __ia64_native_start_gate_mckinley_e9_patchlist +#define __paravirt_end_gate_mckinley_e9_patchlist \ + __ia64_native_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h index b8e6eb1090d7..8d72962ec838 100644 --- a/arch/ia64/include/asm/native/pvchk_inst.h +++ b/arch/ia64/include/asm/native/pvchk_inst.h @@ -180,6 +180,11 @@ IS_PRED_IN(pred) \ IS_RREG_OUT(reg) \ IS_RREG_CLOB(clob) +#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ + IS_PRED_IN(pred) \ + IS_PRED_CLOB(pred_clob) \ + IS_RREG_OUT(reg) \ + IS_RREG_CLOB(clob) #define MOV_TO_IFA(reg, clob) \ IS_RREG_IN(reg) \ IS_RREG_CLOB(clob) @@ -246,6 +251,9 @@ IS_RREG_CLOB(clob2) #define RSM_PSR_DT \ nop 0 +#define RSM_PSR_BE_I(clob0, clob1) \ + IS_RREG_CLOB(clob0) \ + IS_RREG_CLOB(clob1) #define SSM_PSR_DT_AND_SRLZ_I \ nop 0 #define BSW_0(clob0, clob1, clob2) \ diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 2bf3636473fe..2eb0a981a09a 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h @@ -22,6 +22,56 @@ #ifndef __ASM_PARAVIRT_H #define __ASM_PARAVIRT_H +#ifndef __ASSEMBLY__ +/****************************************************************************** + * fsys related addresses + */ +struct pv_fsys_data { + unsigned long *fsyscall_table; + void *fsys_bubble_down; +}; + +extern struct pv_fsys_data pv_fsys_data; + +unsigned long *paravirt_get_fsyscall_table(void); +char *paravirt_get_fsys_bubble_down(void); + +/****************************************************************************** + * patchlist addresses for gate page + */ +enum pv_gate_patchlist { + PV_GATE_START_FSYSCALL, + PV_GATE_END_FSYSCALL, + + PV_GATE_START_BRL_FSYS_BUBBLE_DOWN, + PV_GATE_END_BRL_FSYS_BUBBLE_DOWN, + + PV_GATE_START_VTOP, + PV_GATE_END_VTOP, + + PV_GATE_START_MCKINLEY_E9, + PV_GATE_END_MCKINLEY_E9, +}; + +struct pv_patchdata { + unsigned long start_fsyscall_patchlist; + unsigned long end_fsyscall_patchlist; + unsigned long start_brl_fsys_bubble_down_patchlist; + unsigned long end_brl_fsys_bubble_down_patchlist; + unsigned long start_vtop_patchlist; + unsigned long end_vtop_patchlist; + unsigned long start_mckinley_e9_patchlist; + unsigned long end_mckinley_e9_patchlist; + + void *gate_section; +}; + +extern struct pv_patchdata pv_patchdata; + +unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type); +void *paravirt_get_gate_section(void); +#endif + #ifdef CONFIG_PARAVIRT_GUEST #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 @@ -68,6 +118,14 @@ struct pv_init_ops { int (*arch_setup_nomca)(void); void (*post_smp_prepare_boot_cpu)(void); + +#ifdef ASM_SUPPORTED + unsigned long (*patch_bundle)(void *sbundle, void *ebundle, + unsigned long type); + unsigned long (*patch_inst)(unsigned long stag, unsigned long etag, + unsigned long type); +#endif + void (*patch_branch)(unsigned long tag, unsigned long type); }; extern struct pv_init_ops pv_init_ops; @@ -210,6 +268,8 @@ struct pv_time_ops { int (*do_steal_accounting)(unsigned long *new_itm); void (*clocksource_resume)(void); + + unsigned long long (*sched_clock)(void); }; extern struct pv_time_ops pv_time_ops; @@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm) return pv_time_ops.do_steal_accounting(new_itm); } +static inline unsigned long long paravirt_sched_clock(void) +{ + return pv_time_ops.sched_clock(); +} + #endif /* !__ASSEMBLY__ */ #else diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h new file mode 100644 index 000000000000..128ff5db6e67 --- /dev/null +++ b/arch/ia64/include/asm/paravirt_patch.h @@ -0,0 +1,143 @@ +/****************************************************************************** + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __ASM_PARAVIRT_PATCH_H +#define __ASM_PARAVIRT_PATCH_H + +#ifdef __ASSEMBLY__ + + .section .paravirt_branches, "a" + .previous +#define PARAVIRT_PATCH_SITE_BR(type) \ + { \ + [1:] ; \ + br.cond.sptk.many 2f ; \ + nop.b 0 ; \ + nop.b 0;; ; \ + } ; \ + 2: \ + .xdata8 ".paravirt_branches", 1b, type + +#else + +#include <linux/stringify.h> +#include <asm/intrinsics.h> + +/* for binary patch */ +struct paravirt_patch_site_bundle { + void *sbundle; + void *ebundle; + unsigned long type; +}; + +/* label means the beginning of new bundle */ +#define paravirt_alt_bundle(instr, privop) \ + "\t998:\n" \ + "\t" instr "\n" \ + "\t999:\n" \ + "\t.pushsection .paravirt_bundles, \"a\"\n" \ + "\t.popsection\n" \ + "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \ + __stringify(privop) "\n" + + +struct paravirt_patch_bundle_elem { + const void *sbundle; + const void *ebundle; + unsigned long type; +}; + + +struct paravirt_patch_site_inst { + unsigned long stag; + unsigned long etag; + unsigned long type; +}; + +#define paravirt_alt_inst(instr, privop) \ + "\t[998:]\n" \ + "\t" instr "\n" \ + "\t[999:]\n" \ + "\t.pushsection .paravirt_insts, \"a\"\n" \ + "\t.popsection\n" \ + "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \ + __stringify(privop) "\n" + +struct paravirt_patch_site_branch { + unsigned long tag; + unsigned long type; +}; + +struct paravirt_patch_branch_target { + const void *entry; + unsigned long type; +}; + +void +__paravirt_patch_apply_branch( + unsigned long tag, unsigned long type, + const struct paravirt_patch_branch_target *entries, + unsigned int nr_entries); + +void +paravirt_patch_reloc_br(unsigned long tag, const void *target); + +void +paravirt_patch_reloc_brl(unsigned long tag, const void *target); + + +#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT) +unsigned long +ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type); + +unsigned long +__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, + const struct paravirt_patch_bundle_elem *elems, + unsigned long nelems, + const struct paravirt_patch_bundle_elem **found); + +void +paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, + const struct paravirt_patch_site_bundle *end); + +void +paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, + const struct paravirt_patch_site_inst *end); + +void paravirt_patch_apply(void); +#else +#define paravirt_patch_apply_bundle(start, end) do { } while (0) +#define paravirt_patch_apply_inst(start, end) do { } while (0) +#define paravirt_patch_apply() do { } while (0) +#endif + +#endif /* !__ASSEMBLEY__ */ + +#endif /* __ASM_PARAVIRT_PATCH_H */ + +/* + * Local variables: + * mode: C + * c-set-style: "linux" + * c-basic-offset: 8 + * tab-width: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h index 33c8e55f5775..3d2951130b5f 100644 --- a/arch/ia64/include/asm/paravirt_privop.h +++ b/arch/ia64/include/asm/paravirt_privop.h @@ -33,7 +33,7 @@ */ struct pv_cpu_ops { - void (*fc)(unsigned long addr); + void (*fc)(void *addr); unsigned long (*thash)(unsigned long addr); unsigned long (*get_cpuid)(int index); unsigned long (*get_pmd)(int index); @@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum); /* Instructions paravirtualized for performance */ /************************************************/ +#ifndef ASM_SUPPORTED +#define paravirt_ssm_i() pv_cpu_ops.ssm_i() +#define paravirt_rsm_i() pv_cpu_ops.rsm_i() +#define __paravirt_getreg() pv_cpu_ops.getreg() +#endif + /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). * static inline function doesn't satisfy it. */ #define paravirt_ssm(mask) \ do { \ if ((mask) == IA64_PSR_I) \ - pv_cpu_ops.ssm_i(); \ + paravirt_ssm_i(); \ else \ ia64_native_ssm(mask); \ } while (0) @@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); #define paravirt_rsm(mask) \ do { \ if ((mask) == IA64_PSR_I) \ - pv_cpu_ops.rsm_i(); \ + paravirt_rsm_i(); \ else \ ia64_native_rsm(mask); \ } while (0) @@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); if ((reg) == _IA64_REG_IP) \ res = ia64_native_getreg(_IA64_REG_IP); \ else \ - res = pv_cpu_ops.getreg(reg); \ + res = __paravirt_getreg(reg); \ res; \ }) @@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); #endif /* CONFIG_PARAVIRT */ +#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED) +#define paravirt_dv_serialize_data() ia64_dv_serialize_data() +#else +#define paravirt_dv_serialize_data() /* nothing */ +#endif + /* these routines utilize privilege-sensitive or performance-sensitive * privileged instructions so the code must be replaced with * paravirtualized versions */ @@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) + +#if defined(CONFIG_PARAVIRT) +/****************************************************************************** + * binary patching infrastructure + */ +#define PARAVIRT_PATCH_TYPE_FC 1 +#define PARAVIRT_PATCH_TYPE_THASH 2 +#define PARAVIRT_PATCH_TYPE_GET_CPUID 3 +#define PARAVIRT_PATCH_TYPE_GET_PMD 4 +#define PARAVIRT_PATCH_TYPE_PTCGA 5 +#define PARAVIRT_PATCH_TYPE_GET_RR 6 +#define PARAVIRT_PATCH_TYPE_SET_RR 7 +#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8 +#define PARAVIRT_PATCH_TYPE_SSM_I 9 +#define PARAVIRT_PATCH_TYPE_RSM_I 10 +#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11 +#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12 + +/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */ +#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000 +#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000 + +/* + * struct task_struct* (*ia64_switch_to)(void* next_task); + * void *ia64_leave_syscall; + * void *ia64_work_processed_syscall + * void *ia64_leave_kernel; + */ + +#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000 +#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \ + (PARAVIRT_PATCH_TYPE_BR_START + 0) +#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \ + (PARAVIRT_PATCH_TYPE_BR_START + 1) +#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \ + (PARAVIRT_PATCH_TYPE_BR_START + 2) +#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \ + (PARAVIRT_PATCH_TYPE_BR_START + 3) + +#ifdef ASM_SUPPORTED +#include <asm/paravirt_patch.h> + +/* + * pv_cpu_ops calling stub. + * normal function call convension can't be written by gcc + * inline assembly. + * + * from the caller's point of view, + * the following registers will be clobbered. + * r2, r3 + * r8-r15 + * r16, r17 + * b6, b7 + * p6-p15 + * ar.ccv + * + * from the callee's point of view , + * the following registers can be used. + * r2, r3: scratch + * r8: scratch, input argument0 and return value + * r0-r15: scratch, input argument1-5 + * b6: return pointer + * b7: scratch + * p6-p15: scratch + * ar.ccv: scratch + * + * other registers must not be changed. especially + * b0: rp: preserved. gcc ignores b0 in clobbered register. + * r16: saved gp + */ +/* 5 bundles */ +#define __PARAVIRT_BR \ + ";;\n" \ + "{ .mlx\n" \ + "nop 0\n" \ + "movl r2 = %[op_addr]\n"/* get function pointer address */ \ + ";;\n" \ + "}\n" \ + "1:\n" \ + "{ .mii\n" \ + "ld8 r2 = [r2]\n" /* load function descriptor address */ \ + "mov r17 = ip\n" /* get ip to calc return address */ \ + "mov r16 = gp\n" /* save gp */ \ + ";;\n" \ + "}\n" \ + "{ .mii\n" \ + "ld8 r3 = [r2], 8\n" /* load entry address */ \ + "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \ + ";;\n" \ + "mov b7 = r3\n" /* set entry address */ \ + "}\n" \ + "{ .mib\n" \ + "ld8 gp = [r2]\n" /* load gp value */ \ + "mov b6 = r17\n" /* set return address */ \ + "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \ + "}\n" \ + "1:\n" \ + "{ .mii\n" \ + "mov gp = r16\n" /* restore gp value */ \ + "nop 0\n" \ + "nop 0\n" \ + ";;\n" \ + "}\n" + +#define PARAVIRT_OP(op) \ + [op_addr] "i"(&pv_cpu_ops.op) + +#define PARAVIRT_TYPE(type) \ + PARAVIRT_PATCH_TYPE_ ## type + +#define PARAVIRT_REG_CLOBBERS0 \ + "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ + "r15", "r16", "r17" + +#define PARAVIRT_REG_CLOBBERS1 \ + "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ + "r15", "r16", "r17" + +#define PARAVIRT_REG_CLOBBERS2 \ + "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \ + "r15", "r16", "r17" + +#define PARAVIRT_REG_CLOBBERS5 \ + "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \ + "r15", "r16", "r17" + +#define PARAVIRT_BR_CLOBBERS \ + "b6", "b7" + +#define PARAVIRT_PR_CLOBBERS \ + "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" + +#define PARAVIRT_AR_CLOBBERS \ + "ar.ccv" + +#define PARAVIRT_CLOBBERS0 \ + PARAVIRT_REG_CLOBBERS0, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_CLOBBERS1 \ + PARAVIRT_REG_CLOBBERS1, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_CLOBBERS2 \ + PARAVIRT_REG_CLOBBERS2, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_CLOBBERS5 \ + PARAVIRT_REG_CLOBBERS5, \ + PARAVIRT_BR_CLOBBERS, \ + PARAVIRT_PR_CLOBBERS, \ + PARAVIRT_AR_CLOBBERS, \ + "memory" + +#define PARAVIRT_BR0(op, type) \ + register unsigned long ia64_clobber asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber) \ + : PARAVIRT_OP(op) \ + : PARAVIRT_CLOBBERS0) + +#define PARAVIRT_BR0_RET(op, type) \ + register unsigned long ia64_intri_res asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_intri_res) \ + : PARAVIRT_OP(op) \ + : PARAVIRT_CLOBBERS0) + +#define PARAVIRT_BR1(op, type, arg1) \ + register unsigned long __##arg1 asm ("r8") = arg1; \ + register unsigned long ia64_clobber asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber) \ + : PARAVIRT_OP(op), "0"(__##arg1) \ + : PARAVIRT_CLOBBERS1) + +#define PARAVIRT_BR1_RET(op, type, arg1) \ + register unsigned long ia64_intri_res asm ("r8"); \ + register unsigned long __##arg1 asm ("r8") = arg1; \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_intri_res) \ + : PARAVIRT_OP(op), "0"(__##arg1) \ + : PARAVIRT_CLOBBERS1) + +#define PARAVIRT_BR1_VOID(op, type, arg1) \ + register void *__##arg1 asm ("r8") = arg1; \ + register unsigned long ia64_clobber asm ("r8"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber) \ + : PARAVIRT_OP(op), "0"(__##arg1) \ + : PARAVIRT_CLOBBERS1) + +#define PARAVIRT_BR2(op, type, arg1, arg2) \ + register unsigned long __##arg1 asm ("r8") = arg1; \ + register unsigned long __##arg2 asm ("r9") = arg2; \ + register unsigned long ia64_clobber1 asm ("r8"); \ + register unsigned long ia64_clobber2 asm ("r9"); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(type)) \ + : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \ + : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \ + : PARAVIRT_CLOBBERS2) + + +#define PARAVIRT_DEFINE_CPU_OP0(op, type) \ + static inline void \ + paravirt_ ## op (void) \ + { \ + PARAVIRT_BR0(op, type); \ + } + +#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \ + static inline unsigned long \ + paravirt_ ## op (void) \ + { \ + PARAVIRT_BR0_RET(op, type); \ + return ia64_intri_res; \ + } + +#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \ + static inline void \ + paravirt_ ## op (void *arg1) \ + { \ + PARAVIRT_BR1_VOID(op, type, arg1); \ + } + +#define PARAVIRT_DEFINE_CPU_OP1(op, type) \ + static inline void \ + paravirt_ ## op (unsigned long arg1) \ + { \ + PARAVIRT_BR1(op, type, arg1); \ + } + +#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \ + static inline unsigned long \ + paravirt_ ## op (unsigned long arg1) \ + { \ + PARAVIRT_BR1_RET(op, type, arg1); \ + return ia64_intri_res; \ + } + +#define PARAVIRT_DEFINE_CPU_OP2(op, type) \ + static inline void \ + paravirt_ ## op (unsigned long arg1, \ + unsigned long arg2) \ + { \ + PARAVIRT_BR2(op, type, arg1, arg2); \ + } + + +PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC); +PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH) +PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID) +PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD) +PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA) +PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR) +PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR) +PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I) +PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I) +PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I) +PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE) + +static inline void +paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4) +{ + register unsigned long __val0 asm ("r8") = val0; + register unsigned long __val1 asm ("r9") = val1; + register unsigned long __val2 asm ("r10") = val2; + register unsigned long __val3 asm ("r11") = val3; + register unsigned long __val4 asm ("r14") = val4; + + register unsigned long ia64_clobber0 asm ("r8"); + register unsigned long ia64_clobber1 asm ("r9"); + register unsigned long ia64_clobber2 asm ("r10"); + register unsigned long ia64_clobber3 asm ("r11"); + register unsigned long ia64_clobber4 asm ("r14"); + + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, + PARAVIRT_TYPE(SET_RR0_TO_RR4)) + : "=r"(ia64_clobber0), + "=r"(ia64_clobber1), + "=r"(ia64_clobber2), + "=r"(ia64_clobber3), + "=r"(ia64_clobber4) + : PARAVIRT_OP(set_rr0_to_rr4), + "0"(__val0), "1"(__val1), "2"(__val2), + "3"(__val3), "4"(__val4) + : PARAVIRT_CLOBBERS5); +} + +/* unsigned long paravirt_getreg(int reg) */ +#define __paravirt_getreg(reg) \ + ({ \ + register unsigned long ia64_intri_res asm ("r8"); \ + register unsigned long __reg asm ("r8") = (reg); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(GETREG) \ + + (reg)) \ + : "=r"(ia64_intri_res) \ + : PARAVIRT_OP(getreg), "0"(__reg) \ + : PARAVIRT_CLOBBERS1); \ + \ + ia64_intri_res; \ + }) + +/* void paravirt_setreg(int reg, unsigned long val) */ +#define paravirt_setreg(reg, val) \ + do { \ + register unsigned long __val asm ("r8") = val; \ + register unsigned long __reg asm ("r9") = reg; \ + register unsigned long ia64_clobber1 asm ("r8"); \ + register unsigned long ia64_clobber2 asm ("r9"); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ + PARAVIRT_TYPE(SETREG) \ + + (reg)) \ + : "=r"(ia64_clobber1), \ + "=r"(ia64_clobber2) \ + : PARAVIRT_OP(setreg), \ + "1"(__reg), "0"(__val) \ + : PARAVIRT_CLOBBERS2); \ + } while (0) + +#endif /* ASM_SUPPORTED */ +#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */ + #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 77f30b664b4e..30cf46534dd2 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -27,12 +27,12 @@ extern void *per_cpu_init(void); #else /* ! SMP */ -#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu"))) - #define per_cpu_init() (__phys_per_cpu_start) #endif /* SMP */ +#define PER_CPU_BASE_SECTION ".data.percpu" + /* * Be extremely careful when taking the address of this variable! Due to virtual * remapping, it is different from the canonical address returned by __get_cpu_var(var)! diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 21c402365d0e..598408336251 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *); extern int is_multithreading_enabled(void); extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask #else /* CONFIG_SMP */ diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index d5ef0aa3e312..745421225ec6 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h @@ -63,4 +63,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 0229fb95fb38..13ab71576bc7 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -120,6 +120,38 @@ do { \ #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#ifdef ASM_SUPPORTED + +static __always_inline void +__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) +{ + __asm__ __volatile__ ( + "tbit.nz p6, p0 = %1,%2\n" + "br.few 3f\n" + "1:\n" + "fetchadd4.rel r2 = [%0], -1;;\n" + "(p6) ssm psr.i\n" + "2:\n" + "hint @pause\n" + "ld4 r2 = [%0];;\n" + "cmp4.lt p7,p0 = r2, r0\n" + "(p7) br.cond.spnt.few 2b\n" + "(p6) rsm psr.i\n" + ";;\n" + "3:\n" + "fetchadd4.acq r2 = [%0], 1;;\n" + "cmp4.lt p7,p0 = r2, r0\n" + "(p7) br.cond.spnt.few 1b\n" + : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) + : "p6", "p7", "r2", "memory"); +} + +#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) + +#else /* !ASM_SUPPORTED */ + +#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) + #define __raw_read_lock(rw) \ do { \ raw_rwlock_t *__read_lock_ptr = (rw); \ @@ -131,6 +163,8 @@ do { \ } \ } while (0) +#endif /* !ASM_SUPPORTED */ + #define __raw_read_unlock(rw) \ do { \ raw_rwlock_t *__read_lock_ptr = (rw); \ @@ -138,20 +172,33 @@ do { \ } while (0) #ifdef ASM_SUPPORTED -#define __raw_write_lock(rw) \ -do { \ - __asm__ __volatile__ ( \ - "mov ar.ccv = r0\n" \ - "dep r29 = -1, r0, 31, 1;;\n" \ - "1:\n" \ - "ld4 r2 = [%0];;\n" \ - "cmp4.eq p0,p7 = r0,r2\n" \ - "(p7) br.cond.spnt.few 1b \n" \ - "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ - "cmp4.eq p0,p7 = r0, r2\n" \ - "(p7) br.cond.spnt.few 1b;;\n" \ - :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ -} while(0) + +static __always_inline void +__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) +{ + __asm__ __volatile__ ( + "tbit.nz p6, p0 = %1, %2\n" + "mov ar.ccv = r0\n" + "dep r29 = -1, r0, 31, 1\n" + "br.few 3f;;\n" + "1:\n" + "(p6) ssm psr.i\n" + "2:\n" + "hint @pause\n" + "ld4 r2 = [%0];;\n" + "cmp4.eq p0,p7 = r0, r2\n" + "(p7) br.cond.spnt.few 2b\n" + "(p6) rsm psr.i\n" + ";;\n" + "3:\n" + "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" + "cmp4.eq p0,p7 = r0, r2\n" + "(p7) br.cond.spnt.few 1b;;\n" + : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) + : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); +} + +#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) #define __raw_write_trylock(rw) \ ({ \ @@ -174,6 +221,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #else /* !ASM_SUPPORTED */ +#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) + #define __raw_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ diff --git a/arch/ia64/include/asm/swab.h b/arch/ia64/include/asm/swab.h index 6aa58b699eea..c89a8cb5d8a5 100644 --- a/arch/ia64/include/asm/swab.h +++ b/arch/ia64/include/asm/swab.h @@ -6,7 +6,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. */ -#include <asm/types.h> +#include <linux/types.h> #include <asm/intrinsics.h> #include <linux/compiler.h> diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h index 4e03cfe74a0c..86c7db861180 100644 --- a/arch/ia64/include/asm/timex.h +++ b/arch/ia64/include/asm/timex.h @@ -40,5 +40,6 @@ get_cycles (void) } extern void ia64_cpu_local_tick (void); +extern unsigned long long ia64_native_sched_clock (void); #endif /* _ASM_IA64_TIMEX_H */ diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 32f3af1641c5..7b4c8c70b2d1 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -44,11 +44,6 @@ #define parent_node(nid) (nid) /* - * Returns the number of the first CPU on Node 'node'. - */ -#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) - -/* * Determines the node for a given pci bus */ #define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node @@ -84,7 +79,7 @@ void build_cpu_to_node_map(void); .child = NULL, \ .groups = NULL, \ .min_interval = 8, \ - .max_interval = 8*(min(num_online_cpus(), 32)), \ + .max_interval = 8*(min(num_online_cpus(), 32U)), \ .busy_factor = 64, \ .imbalance_pct = 125, \ .cache_nice_tries = 2, \ @@ -117,11 +112,6 @@ void build_cpu_to_node_map(void); extern void arch_fix_phys_package_id(int num, u32 slot); -#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ - CPU_MASK_ALL : \ - node_to_cpumask(pcibus_to_node(bus)) \ - ) - #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h new file mode 100644 index 000000000000..61b5bdfd980e --- /dev/null +++ b/arch/ia64/include/asm/uv/uv.h @@ -0,0 +1,13 @@ +#ifndef _ASM_IA64_UV_UV_H +#define _ASM_IA64_UV_UV_H + +#include <asm/system.h> +#include <asm/sn/simulator.h> + +static inline int is_uv_system(void) +{ + /* temporary support for running on hardware simulator */ + return IS_MEDUSA() || ia64_platform_is("uv"); +} + +#endif /* _ASM_IA64_UV_UV_H */ diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h index f607018af4a1..53e9dfacd073 100644 --- a/arch/ia64/include/asm/uv/uv_hub.h +++ b/arch/ia64/include/asm/uv/uv_hub.h @@ -305,5 +305,11 @@ static inline int uv_num_possible_blades(void) return 1; } +static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) +{ + /* not currently needed on ia64 */ +} + + #endif /* __ASM_IA64_UV_HUB__ */ diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h index c149ef085437..fe0b8f05e1a8 100644 --- a/arch/ia64/include/asm/uv/uv_mmrs.h +++ b/arch/ia64/include/asm/uv/uv_mmrs.h @@ -8,8 +8,8 @@ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. */ -#ifndef __ASM_IA64_UV_MMRS__ -#define __ASM_IA64_UV_MMRS__ +#ifndef _ASM_IA64_UV_UV_MMRS_H +#define _ASM_IA64_UV_UV_MMRS_H #define UV_MMR_ENABLE (1UL << 63) @@ -243,6 +243,158 @@ union uvh_event_occurred0_u { #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 /* ========================================================================= */ +/* UVH_GR0_TLB_INT0_CONFIG */ +/* ========================================================================= */ +#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL + +#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 +#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 +#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 +#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 +#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13 +#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15 +#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16 +#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 +#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_gr0_tlb_int0_config_u { + unsigned long v; + struct uvh_gr0_tlb_int0_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_GR0_TLB_INT1_CONFIG */ +/* ========================================================================= */ +#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL + +#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 +#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 +#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 +#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 +#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13 +#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15 +#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16 +#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 +#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_gr0_tlb_int1_config_u { + unsigned long v; + struct uvh_gr0_tlb_int1_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_GR1_TLB_INT0_CONFIG */ +/* ========================================================================= */ +#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL + +#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 +#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 +#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 +#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 +#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13 +#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15 +#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16 +#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 +#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_gr1_tlb_int0_config_u { + unsigned long v; + struct uvh_gr1_tlb_int0_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_GR1_TLB_INT1_CONFIG */ +/* ========================================================================= */ +#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL + +#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 +#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 +#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 +#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 +#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13 +#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15 +#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16 +#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 +#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_gr1_tlb_int1_config_u { + unsigned long v; + struct uvh_gr1_tlb_int1_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ /* UVH_INT_CMPB */ /* ========================================================================= */ #define UVH_INT_CMPB 0x22080UL @@ -670,4 +822,4 @@ union uvh_si_alias2_overlay_config_u { }; -#endif /* __ASM_IA64_UV_MMRS__ */ +#endif /* _ASM_IA64_UV_UV_MMRS_H */ diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 7a804e80fc67..e425227a418e 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h @@ -33,9 +33,6 @@ #ifndef _ASM_IA64_XEN_HYPERVISOR_H #define _ASM_IA64_XEN_HYPERVISOR_H -#ifdef CONFIG_XEN - -#include <linux/init.h> #include <xen/interface/xen.h> #include <xen/interface/version.h> /* to compile feature.c */ #include <xen/features.h> /* to comiple xen-netfront.c */ @@ -43,22 +40,32 @@ /* xen_domain_type is set before executing any C code by early_xen_setup */ enum xen_domain_type { - XEN_NATIVE, - XEN_PV_DOMAIN, - XEN_HVM_DOMAIN, + XEN_NATIVE, /* running on bare hardware */ + XEN_PV_DOMAIN, /* running in a PV domain */ + XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ }; +#ifdef CONFIG_XEN extern enum xen_domain_type xen_domain_type; +#else +#define xen_domain_type XEN_NATIVE +#endif #define xen_domain() (xen_domain_type != XEN_NATIVE) -#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) -#define xen_initial_domain() (xen_pv_domain() && \ +#define xen_pv_domain() (xen_domain() && \ + xen_domain_type == XEN_PV_DOMAIN) +#define xen_hvm_domain() (xen_domain() && \ + xen_domain_type == XEN_HVM_DOMAIN) + +#ifdef CONFIG_XEN_DOM0 +#define xen_initial_domain() (xen_pv_domain() && \ (xen_start_info->flags & SIF_INITDOMAIN)) -#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) +#else +#define xen_initial_domain() (0) +#endif -/* deprecated. remove this */ -#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN) +#ifdef CONFIG_XEN extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; @@ -74,16 +81,6 @@ void force_evtchn_callback(void); /* For setup_arch() in arch/ia64/kernel/setup.c */ void xen_ia64_enable_opt_feature(void); - -#else /* CONFIG_XEN */ - -#define xen_domain() (0) -#define xen_pv_domain() (0) -#define xen_initial_domain() (0) -#define xen_hvm_domain() (0) -#define is_running_on_xen() (0) /* deprecated. remove this */ #endif -#define is_initial_xendomain() (0) /* deprecated. remove this */ - #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h index 19c2ae1d878a..c53a47611208 100644 --- a/arch/ia64/include/asm/xen/inst.h +++ b/arch/ia64/include/asm/xen/inst.h @@ -33,6 +33,9 @@ #define __paravirt_work_processed_syscall_target \ xen_work_processed_syscall +#define paravirt_fsyscall_table xen_fsyscall_table +#define paravirt_fsys_bubble_down xen_fsys_bubble_down + #define MOV_FROM_IFA(reg) \ movl reg = XSI_IFA; \ ;; \ @@ -110,6 +113,27 @@ .endm #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob +/* assuming ar.itc is read with interrupt disabled. */ +#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ +(pred) movl clob = XSI_ITC_OFFSET; \ + ;; \ +(pred) ld8 clob = [clob]; \ +(pred) mov reg = ar.itc; \ + ;; \ +(pred) add reg = reg, clob; \ + ;; \ +(pred) movl clob = XSI_ITC_LAST; \ + ;; \ +(pred) ld8 clob = [clob]; \ + ;; \ +(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ + ;; \ +(pred_clob) add reg = 1, clob; \ + ;; \ +(pred) movl clob = XSI_ITC_LAST; \ + ;; \ +(pred) st8 [clob] = reg + #define MOV_TO_IFA(reg, clob) \ movl clob = XSI_IFA; \ @@ -362,6 +386,10 @@ #define RSM_PSR_DT \ XEN_HYPER_RSM_PSR_DT +#define RSM_PSR_BE_I(clob0, clob1) \ + RSM_PSR_I(p0, clob0, clob1); \ + rum psr.be + #define SSM_PSR_DT_AND_SRLZ_I \ XEN_HYPER_SSM_PSR_DT diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h index f00fab40854d..e951e740bdf2 100644 --- a/arch/ia64/include/asm/xen/interface.h +++ b/arch/ia64/include/asm/xen/interface.h @@ -209,6 +209,15 @@ struct mapped_regs { unsigned long krs[8]; /* kernel registers */ unsigned long tmp[16]; /* temp registers (e.g. for hyperprivops) */ + + /* itc paravirtualization + * vAR.ITC = mAR.ITC + itc_offset + * itc_last is one which was lastly passed to + * the guest OS in order to prevent it from + * going backwords. + */ + unsigned long itc_offset; + unsigned long itc_last; }; }; }; diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h index 4d92d9bbda7b..c57fa910f2c9 100644 --- a/arch/ia64/include/asm/xen/minstate.h +++ b/arch/ia64/include/asm/xen/minstate.h @@ -1,3 +1,12 @@ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +/* read ar.itc in advance, and use it before leaving bank 0 */ +#define XEN_ACCOUNT_GET_STAMP \ + MOV_FROM_ITC(pUStk, p6, r20, r2); +#else +#define XEN_ACCOUNT_GET_STAMP +#endif + /* * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves * the minimum state necessary that allows us to turn psr.ic back @@ -123,7 +132,7 @@ ;; \ .mem.offset 0,0; st8.spill [r16]=r2,16; \ .mem.offset 8,0; st8.spill [r17]=r3,16; \ - ACCOUNT_GET_STAMP \ + XEN_ACCOUNT_GET_STAMP \ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ ;; \ EXTRA; \ diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h new file mode 100644 index 000000000000..eae944e88846 --- /dev/null +++ b/arch/ia64/include/asm/xen/patchlist.h @@ -0,0 +1,38 @@ +/****************************************************************************** + * arch/ia64/include/asm/xen/patchlist.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define __paravirt_start_gate_fsyscall_patchlist \ + __xen_start_gate_fsyscall_patchlist +#define __paravirt_end_gate_fsyscall_patchlist \ + __xen_end_gate_fsyscall_patchlist +#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ + __xen_start_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ + __xen_end_gate_brl_fsys_bubble_down_patchlist +#define __paravirt_start_gate_vtop_patchlist \ + __xen_start_gate_vtop_patchlist +#define __paravirt_end_gate_vtop_patchlist \ + __xen_end_gate_vtop_patchlist +#define __paravirt_start_gate_mckinley_e9_patchlist \ + __xen_start_gate_mckinley_e9_patchlist +#define __paravirt_end_gate_mckinley_e9_patchlist \ + __xen_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h index 71ec7546e100..fb4ec5e0b066 100644 --- a/arch/ia64/include/asm/xen/privop.h +++ b/arch/ia64/include/asm/xen/privop.h @@ -55,6 +55,8 @@ #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) +#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) +#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) #endif #ifndef __ASSEMBLY__ @@ -67,7 +69,7 @@ * may have different semantics depending on whether they are executed * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't * be allowed to execute directly, lest incorrect semantics result. */ -extern void xen_fc(unsigned long addr); +extern void xen_fc(void *addr); extern unsigned long xen_thash(unsigned long addr); /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" @@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr); extern unsigned long xen_get_cpuid(int index); extern unsigned long xen_get_pmd(int index); +#ifndef ASM_SUPPORTED extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ +#endif /************************************************/ /* Instructions paravirtualized for performance */ @@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ #define xen_get_virtual_pend() \ (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) +#ifndef ASM_SUPPORTED /* Although all privileged operations can be left to trap and will * be properly handled by Xen, some are frequent enough that we use * hyperprivops for performance. */ @@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, unsigned long val4); extern void xen_set_kr(unsigned long index, unsigned long val); extern void xen_ptcga(unsigned long addr, unsigned long size); +#endif /* !ASM_SUPPORTED */ #endif /* !__ASSEMBLY__ */ |