diff options
Diffstat (limited to 'arch/s390/pci/pci_insn.c')
-rw-r--r-- | arch/s390/pci/pci_insn.c | 169 |
1 files changed, 161 insertions, 8 deletions
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index f069929e8211..02f9505c99a8 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -8,9 +8,11 @@ #include <linux/export.h> #include <linux/errno.h> #include <linux/delay.h> +#include <linux/jump_label.h> #include <asm/facility.h> #include <asm/pci_insn.h> #include <asm/pci_debug.h> +#include <asm/pci_io.h> #include <asm/processor.h> #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ @@ -96,13 +98,15 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } /* Set Interruption Controls */ -int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) +int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib) { if (!test_facility(72)) return -EIO; - asm volatile ( - " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" - : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); + + asm volatile( + ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n" + : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib)); + return 0; } @@ -140,7 +144,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) return cc; } -int zpci_load(u64 *data, u64 req, u64 offset) +int __zpci_load(u64 *data, u64 req, u64 offset) { u8 status; int cc; @@ -156,6 +160,52 @@ int zpci_load(u64 *data, u64 req, u64 offset) return (cc > 0) ? -EIO : cc; } +EXPORT_SYMBOL_GPL(__zpci_load); + +static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr, + unsigned long len) +{ + struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; + u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len); + + return __zpci_load(data, req, ZPCI_OFFSET(addr)); +} + +static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status) +{ + register u64 addr asm("2") = ioaddr; + register u64 r3 asm("3") = len; + int cc = -ENXIO; + u64 __data; + + asm volatile ( + " .insn rre,0xb9d60000,%[data],%[ioaddr]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3) + : [ioaddr] "d" (addr) + : "cc"); + *status = r3 >> 24 & 0xff; + *data = __data; + return cc; +} + +int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len) +{ + u8 status; + int cc; + + if (!static_branch_unlikely(&have_mio)) + return zpci_load_fh(data, addr, len); + + cc = __pcilg_mio(data, (__force u64) addr, len, &status); + if (cc) + zpci_err_insn(cc, status, 0, (__force u64) addr); + + return (cc > 0) ? -EIO : cc; +} EXPORT_SYMBOL_GPL(zpci_load); /* PCI Store */ @@ -178,7 +228,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) return cc; } -int zpci_store(u64 data, u64 req, u64 offset) +int __zpci_store(u64 data, u64 req, u64 offset) { u8 status; int cc; @@ -194,6 +244,50 @@ int zpci_store(u64 data, u64 req, u64 offset) return (cc > 0) ? -EIO : cc; } +EXPORT_SYMBOL_GPL(__zpci_store); + +static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data, + unsigned long len) +{ + struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; + u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len); + + return __zpci_store(data, req, ZPCI_OFFSET(addr)); +} + +static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status) +{ + register u64 addr asm("2") = ioaddr; + register u64 r3 asm("3") = len; + int cc = -ENXIO; + + asm volatile ( + " .insn rre,0xb9d40000,%[data],%[ioaddr]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), "+d" (r3) + : [data] "d" (data), [ioaddr] "d" (addr) + : "cc"); + *status = r3 >> 24 & 0xff; + return cc; +} + +int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len) +{ + u8 status; + int cc; + + if (!static_branch_unlikely(&have_mio)) + return zpci_store_fh(addr, data, len); + + cc = __pcistg_mio(data, (__force u64) addr, len, &status); + if (cc) + zpci_err_insn(cc, status, 0, (__force u64) addr); + + return (cc > 0) ? -EIO : cc; +} EXPORT_SYMBOL_GPL(zpci_store); /* PCI Store Block */ @@ -214,7 +308,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) return cc; } -int zpci_store_block(const u64 *data, u64 req, u64 offset) +int __zpci_store_block(const u64 *data, u64 req, u64 offset) { u8 status; int cc; @@ -230,4 +324,63 @@ int zpci_store_block(const u64 *data, u64 req, u64 offset) return (cc > 0) ? -EIO : cc; } -EXPORT_SYMBOL_GPL(zpci_store_block); +EXPORT_SYMBOL_GPL(__zpci_store_block); + +static inline int zpci_write_block_fh(volatile void __iomem *dst, + const void *src, unsigned long len) +{ + struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)]; + u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len); + u64 offset = ZPCI_OFFSET(dst); + + return __zpci_store_block(src, req, offset); +} + +static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status) +{ + int cc = -ENXIO; + + asm volatile ( + " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [len] "+d" (len) + : [ioaddr] "d" (ioaddr), [data] "Q" (*data) + : "cc"); + *status = len >> 24 & 0xff; + return cc; +} + +int zpci_write_block(volatile void __iomem *dst, + const void *src, unsigned long len) +{ + u8 status; + int cc; + + if (!static_branch_unlikely(&have_mio)) + return zpci_write_block_fh(dst, src, len); + + cc = __pcistb_mio(src, (__force u64) dst, len, &status); + if (cc) + zpci_err_insn(cc, status, 0, (__force u64) dst); + + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(zpci_write_block); + +static inline void __pciwb_mio(void) +{ + unsigned long unused = 0; + + asm volatile (".insn rre,0xb9d50000,%[op],%[op]\n" + : [op] "+d" (unused)); +} + +void zpci_barrier(void) +{ + if (static_branch_likely(&have_mio)) + __pciwb_mio(); +} +EXPORT_SYMBOL_GPL(zpci_barrier); |