diff options
Diffstat (limited to 'drivers/rapidio')
-rw-r--r-- | drivers/rapidio/devices/tsi721.c | 122 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.h | 15 | ||||
-rw-r--r-- | drivers/rapidio/rio-scan.c | 335 | ||||
-rw-r--r-- | drivers/rapidio/rio.c | 95 |
4 files changed, 437 insertions, 130 deletions
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 722246cf20ab..38ecd8f4d60e 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work) " info %4.4x\n", DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; } iowrite32(rd_ptr & (IDB_QSIZE - 1), @@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work) regval |= TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); } /** @@ -855,6 +862,90 @@ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) } /** + * tsi721_rio_map_inb_mem -- Mapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + * @rstart: RapidIO space start address. + * @size: The mapping region size. + * @flags: Flags for mapping. 0 for using default flags. + * + * Return: 0 -- Success. + * + * This function will create the inbound mapping + * from rstart to lstart. + */ +static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u32 size, u32 flags) +{ + struct tsi721_device *priv = mport->priv; + int i; + u32 regval; + + if (!is_power_of_2(size) || size < 0x1000 || + ((u64)lstart & (size - 1)) || (rstart & (size - 1))) + return -EINVAL; + + /* Search for free inbound translation window */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); + if (!(regval & TSI721_IBWIN_LB_WEN)) + break; + } + + if (i >= TSI721_IBWIN_NUM) { + dev_err(&priv->pdev->dev, + "Unable to find free inbound window\n"); + return -EBUSY; + } + + iowrite32(TSI721_IBWIN_SIZE(size) << 8, + priv->regs + TSI721_IBWIN_SZ(i)); + + iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i)); + iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD), + priv->regs + TSI721_IBWIN_TLA(i)); + + iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i)); + iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, + priv->regs + TSI721_IBWIN_LB(i)); + dev_dbg(&priv->pdev->dev, + "Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n", + i, rstart, (unsigned long long)lstart); + + return 0; +} + +/** + * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + */ +static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, + dma_addr_t lstart) +{ + struct tsi721_device *priv = mport->priv; + int i; + u64 addr; + u32 regval; + + /* Search for matching active inbound translation window */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); + if (regval & TSI721_IBWIN_LB_WEN) { + regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i)); + addr = (u64)regval << 32; + regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i)); + addr |= regval & TSI721_IBWIN_TLA_ADD; + + if (addr == (u64)lstart) { + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + break; + } + } + } +} + +/** * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) * translation regions. * @priv: pointer to tsi721 private data @@ -867,7 +958,7 @@ static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) /* Disable all SR2PC inbound windows */ for (i = 0; i < TSI721_IBWIN_NUM; i++) - iowrite32(0, priv->regs + TSI721_IBWINLB(i)); + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); } /** @@ -2137,6 +2228,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) ops->add_outb_message = tsi721_add_outb_message; ops->add_inb_buffer = tsi721_add_inb_buffer; ops->get_inb_message = tsi721_get_inb_message; + ops->map_inb = tsi721_rio_map_inb_mem; + ops->unmap_inb = tsi721_rio_unmap_inb_mem; mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!mport) { @@ -2158,7 +2251,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); - strcpy(mport->name, "Tsi721 mport"); + snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", + dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); /* Hook up interrupt handler */ @@ -2212,9 +2306,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i, cap; int err; - u32 regval; priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); if (priv == NULL) { @@ -2232,12 +2324,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, priv->pdev = pdev; #ifdef DEBUG + { + int i; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", i, (unsigned long long)pci_resource_start(pdev, i), (unsigned long)pci_resource_len(pdev, i), pci_resource_flags(pdev, i)); } + } #endif /* * Verify BAR configuration @@ -2307,7 +2402,8 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, /* Configure DMA attributes. */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { dev_info(&pdev->dev, "Unable to set DMA mask\n"); goto err_unmap_bars; } @@ -2320,20 +2416,16 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); } - cap = pci_pcie_cap(pdev); - BUG_ON(cap == 0); + BUG_ON(!pci_is_pcie(pdev)); /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ - pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val); - regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | - PCI_EXP_DEVCTL_NOSNOOP_EN); - regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; - pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN, + 0x2 << MAX_READ_REQUEST_SZ_SHIFT); /* Adjust PCIe completion timeout. */ - pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val); - regval &= ~(0x0f); - pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); /* * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 59de9d7be346..7d5b13ba8d4f 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -156,9 +156,18 @@ #define TSI721_IBWIN_NUM 8 -#define TSI721_IBWINLB(x) (0x29000 + (x) * 0x20) -#define TSI721_IBWINLB_BA 0xfffff000 -#define TSI721_IBWINLB_WEN 0x00000001 +#define TSI721_IBWIN_LB(x) (0x29000 + (x) * 0x20) +#define TSI721_IBWIN_LB_BA 0xfffff000 +#define TSI721_IBWIN_LB_WEN 0x00000001 + +#define TSI721_IBWIN_UB(x) (0x29004 + (x) * 0x20) +#define TSI721_IBWIN_SZ(x) (0x29008 + (x) * 0x20) +#define TSI721_IBWIN_SZ_SIZE 0x00001f00 +#define TSI721_IBWIN_SIZE(size) (__fls(size) - 12) + +#define TSI721_IBWIN_TLA(x) (0x2900c + (x) * 0x20) +#define TSI721_IBWIN_TLA_ADD 0xfffff000 +#define TSI721_IBWIN_TUA(x) (0x29010 + (x) * 0x20) #define TSI721_SR2PC_GEN_INTE 0x29800 #define TSI721_SR2PC_PWE 0x29804 diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 2bebd791a092..48e9041dd1e2 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -31,27 +31,21 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/sched.h> #include <linux/jiffies.h> #include <linux/slab.h> #include "rio.h" LIST_HEAD(rio_devices); -static LIST_HEAD(rio_switches); - -static void rio_enum_timeout(unsigned long); static void rio_init_em(struct rio_dev *rdev); DEFINE_SPINLOCK(rio_global_list_lock); static int next_destid = 0; -static int next_net = 0; static int next_comptag = 1; -static struct timer_list rio_enum_timer = -TIMER_INITIALIZER(rio_enum_timeout, 0, 0); - static int rio_mport_phys_table[] = { RIO_EFB_PAR_EP_ID, RIO_EFB_PAR_EP_REC_ID, @@ -60,6 +54,114 @@ static int rio_mport_phys_table[] = { -1, }; + +/* + * rio_destid_alloc - Allocate next available destID for given network + * net: RIO network + * + * Returns next available device destination ID for the specified RIO network. + * Marks allocated ID as one in use. + * Returns RIO_INVALID_DESTID if new destID is not available. + */ +static u16 rio_destid_alloc(struct rio_net *net) +{ + int destid; + struct rio_id_table *idtab = &net->destid_table; + + spin_lock(&idtab->lock); + destid = find_next_zero_bit(idtab->table, idtab->max, idtab->next); + if (destid >= idtab->max) + destid = find_first_zero_bit(idtab->table, idtab->max); + + if (destid < idtab->max) { + idtab->next = destid + 1; + if (idtab->next >= idtab->max) + idtab->next = 0; + set_bit(destid, idtab->table); + destid += idtab->start; + } else + destid = RIO_INVALID_DESTID; + + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/* + * rio_destid_reserve - Reserve the specivied destID + * net: RIO network + * destid: destID to reserve + * + * Tries to reserve the specified destID. + * Returns 0 if successfull. + */ +static int rio_destid_reserve(struct rio_net *net, u16 destid) +{ + int oldbit; + struct rio_id_table *idtab = &net->destid_table; + + destid -= idtab->start; + spin_lock(&idtab->lock); + oldbit = test_and_set_bit(destid, idtab->table); + spin_unlock(&idtab->lock); + return oldbit; +} + +/* + * rio_destid_free - free a previously allocated destID + * net: RIO network + * destid: destID to free + * + * Makes the specified destID available for use. + */ +static void rio_destid_free(struct rio_net *net, u16 destid) +{ + struct rio_id_table *idtab = &net->destid_table; + + destid -= idtab->start; + spin_lock(&idtab->lock); + clear_bit(destid, idtab->table); + spin_unlock(&idtab->lock); +} + +/* + * rio_destid_first - return first destID in use + * net: RIO network + */ +static u16 rio_destid_first(struct rio_net *net) +{ + int destid; + struct rio_id_table *idtab = &net->destid_table; + + spin_lock(&idtab->lock); + destid = find_first_bit(idtab->table, idtab->max); + if (destid >= idtab->max) + destid = RIO_INVALID_DESTID; + else + destid += idtab->start; + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/* + * rio_destid_next - return next destID in use + * net: RIO network + * from: destination ID from which search shall continue + */ +static u16 rio_destid_next(struct rio_net *net, u16 from) +{ + int destid; + struct rio_id_table *idtab = &net->destid_table; + + spin_lock(&idtab->lock); + destid = find_next_bit(idtab->table, idtab->max, from); + if (destid >= idtab->max) + destid = RIO_INVALID_DESTID; + else + destid += idtab->start; + spin_unlock(&idtab->lock); + return (u16)destid; +} + /** * rio_get_device_id - Get the base/extended device id for a device * @port: RIO master port @@ -108,14 +210,15 @@ static void rio_local_set_device_id(struct rio_mport *port, u16 did) /** * rio_clear_locks- Release all host locks and signal enumeration complete - * @port: Master port to issue transaction + * @net: RIO network to run on * * Marks the component tag CSR on each device with the enumeration * complete flag. When complete, it then release the host locks on * each device. Returns 0 on success or %-EINVAL on failure. */ -static int rio_clear_locks(struct rio_mport *port) +static int rio_clear_locks(struct rio_net *net) { + struct rio_mport *port = net->hport; struct rio_dev *rdev; u32 result; int ret = 0; @@ -130,7 +233,7 @@ static int rio_clear_locks(struct rio_mport *port) result); ret = -EINVAL; } - list_for_each_entry(rdev, &rio_devices, global_list) { + list_for_each_entry(rdev, &net->devices, net_list) { rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); @@ -176,10 +279,6 @@ static int rio_enum_host(struct rio_mport *port) /* Set master port destid and init destid ctr */ rio_local_set_device_id(port, port->host_deviceid); - - if (next_destid == port->host_deviceid) - next_destid++; - return 0; } @@ -446,9 +545,8 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { if (do_enum) { rio_set_device_id(port, destid, hopcount, next_destid); - rdev->destid = next_destid++; - if (next_destid == port->host_deviceid) - next_destid++; + rdev->destid = next_destid; + next_destid = rio_destid_alloc(net); } else rdev->destid = rio_get_device_id(port, destid, hopcount); @@ -483,7 +581,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rswitch->clr_table(port, destid, hopcount, RIO_GLOBAL_TABLE); - list_add_tail(&rswitch->node, &rio_switches); + list_add_tail(&rswitch->node, &net->switches); } else { if (do_enum) @@ -747,12 +845,7 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount) static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, u8 hopcount, struct rio_dev *prev, int prev_port) { - int port_num; - int cur_destid; - int sw_destid; - int sw_inport; struct rio_dev *rdev; - u16 destid; u32 regval; int tmp; @@ -818,19 +911,26 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, return -1; if (rio_is_switch(rdev)) { + int sw_destid; + int cur_destid; + int sw_inport; + u16 destid; + int port_num; + sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, port->host_deviceid, sw_inport, 0); rdev->rswitch->route_table[port->host_deviceid] = sw_inport; - for (destid = 0; destid < next_destid; destid++) { - if (destid == port->host_deviceid) - continue; - rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, - destid, sw_inport, 0); - rdev->rswitch->route_table[destid] = sw_inport; + destid = rio_destid_first(net); + while (destid != RIO_INVALID_DESTID && destid < next_destid) { + if (destid != port->host_deviceid) { + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + destid, sw_inport, 0); + rdev->rswitch->route_table[destid] = sw_inport; + } + destid = rio_destid_next(net, destid + 1); } - pr_debug( "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", rio_name(rdev), rdev->vid, rdev->did, @@ -839,12 +939,10 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, for (port_num = 0; port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); port_num++) { - /*Enable Input Output Port (transmitter reviever)*/ - rio_enable_rx_tx_port(port, 0, + if (sw_inport == port_num) { + rio_enable_rx_tx_port(port, 0, RIO_ANY_DESTID(port->sys_size), hopcount, port_num); - - if (sw_inport == port_num) { rdev->rswitch->port_ok |= (1 << port_num); continue; } @@ -857,6 +955,9 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, pr_debug( "RIO: scanning device on port %d\n", port_num); + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); rdev->rswitch->port_ok |= (1 << port_num); rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, RIO_ANY_DESTID(port->sys_size), @@ -867,19 +968,22 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, return -1; /* Update routing tables */ - if (next_destid > cur_destid) { + destid = rio_destid_next(net, cur_destid + 1); + if (destid != RIO_INVALID_DESTID) { for (destid = cur_destid; - destid < next_destid; destid++) { - if (destid == port->host_deviceid) - continue; - rio_route_add_entry(rdev, + destid < next_destid;) { + if (destid != port->host_deviceid) { + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, destid, port_num, 0); - rdev->rswitch-> - route_table[destid] = - port_num; + rdev->rswitch-> + route_table[destid] = + port_num; + } + destid = rio_destid_next(net, + destid + 1); } } } else { @@ -905,11 +1009,8 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, rio_init_em(rdev); /* Check for empty switch */ - if (next_destid == sw_destid) { - next_destid++; - if (next_destid == port->host_deviceid) - next_destid++; - } + if (next_destid == sw_destid) + next_destid = rio_destid_alloc(net); rdev->destid = sw_destid; } else @@ -1047,48 +1148,71 @@ static int rio_mport_is_active(struct rio_mport *port) /** * rio_alloc_net- Allocate and configure a new RIO network * @port: Master port associated with the RIO network + * @do_enum: Enumeration/Discovery mode flag + * @start: logical minimal start id for new net * * Allocates a RIO network structure, initializes per-network * list heads, and adds the associated master port to the * network list of associated master ports. Returns a * RIO network pointer on success or %NULL on failure. */ -static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port) +static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port, + int do_enum, u16 start) { struct rio_net *net; net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); + if (net && do_enum) { + net->destid_table.table = kzalloc( + BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)) * + sizeof(long), + GFP_KERNEL); + + if (net->destid_table.table == NULL) { + pr_err("RIO: failed to allocate destID table\n"); + kfree(net); + net = NULL; + } else { + net->destid_table.start = start; + net->destid_table.next = 0; + net->destid_table.max = + RIO_MAX_ROUTE_ENTRIES(port->sys_size); + spin_lock_init(&net->destid_table.lock); + } + } + if (net) { INIT_LIST_HEAD(&net->node); INIT_LIST_HEAD(&net->devices); + INIT_LIST_HEAD(&net->switches); INIT_LIST_HEAD(&net->mports); list_add_tail(&port->nnode, &net->mports); net->hport = port; - net->id = next_net++; + net->id = port->id; } return net; } /** * rio_update_route_tables- Updates route tables in switches - * @port: Master port associated with the RIO network + * @net: RIO network to run update on * * For each enumerated device, ensure that each switch in a system * has correct routing entries. Add routes for devices that where * unknown dirung the first enumeration pass through the switch. */ -static void rio_update_route_tables(struct rio_mport *port) +static void rio_update_route_tables(struct rio_net *net) { struct rio_dev *rdev, *swrdev; struct rio_switch *rswitch; u8 sport; u16 destid; - list_for_each_entry(rdev, &rio_devices, global_list) { + list_for_each_entry(rdev, &net->devices, net_list) { destid = rdev->destid; - list_for_each_entry(rswitch, &rio_switches, node) { + list_for_each_entry(rswitch, &net->switches, node) { if (rio_is_switch(rdev) && (rdev->rswitch == rswitch)) continue; @@ -1166,12 +1290,16 @@ int __devinit rio_enum_mport(struct rio_mport *mport) /* If master port has an active link, allocate net and enum peers */ if (rio_mport_is_active(mport)) { - if (!(net = rio_alloc_net(mport))) { + net = rio_alloc_net(mport, 1, 0); + if (!net) { printk(KERN_ERR "RIO: failed to allocate new net\n"); rc = -ENOMEM; goto out; } + /* reserve mport destID in new net */ + rio_destid_reserve(net, mport->host_deviceid); + /* Enable Input Output Port (transmitter reviever) */ rio_enable_rx_tx_port(mport, 1, 0, 0, 0); @@ -1179,17 +1307,21 @@ int __devinit rio_enum_mport(struct rio_mport *mport) rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, next_comptag++); + next_destid = rio_destid_alloc(net); + if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) { /* A higher priority host won enumeration, bail. */ printk(KERN_INFO "RIO: master port %d device has lost enumeration to a remote host\n", mport->id); - rio_clear_locks(mport); + rio_clear_locks(net); rc = -EBUSY; goto out; } - rio_update_route_tables(mport); - rio_clear_locks(mport); + /* free the last allocated destID (unused) */ + rio_destid_free(net, next_destid); + rio_update_route_tables(net); + rio_clear_locks(net); rio_pw_enable(mport, 1); } else { printk(KERN_INFO "RIO: master port %d link inactive\n", @@ -1203,47 +1335,34 @@ int __devinit rio_enum_mport(struct rio_mport *mport) /** * rio_build_route_tables- Generate route tables from switch route entries + * @net: RIO network to run route tables scan on * * For each switch device, generate a route table by copying existing * route entries from the switch. */ -static void rio_build_route_tables(void) +static void rio_build_route_tables(struct rio_net *net) { + struct rio_switch *rswitch; struct rio_dev *rdev; int i; u8 sport; - list_for_each_entry(rdev, &rio_devices, global_list) - if (rio_is_switch(rdev)) { - rio_lock_device(rdev->net->hport, rdev->destid, - rdev->hopcount, 1000); - for (i = 0; - i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); - i++) { - if (rio_route_get_entry(rdev, - RIO_GLOBAL_TABLE, i, &sport, 0) < 0) - continue; - rdev->rswitch->route_table[i] = sport; - } + list_for_each_entry(rswitch, &net->switches, node) { + rdev = sw_to_rio_dev(rswitch); - rio_unlock_device(rdev->net->hport, - rdev->destid, - rdev->hopcount); + rio_lock_device(net->hport, rdev->destid, + rdev->hopcount, 1000); + for (i = 0; + i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size); + i++) { + if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, + i, &sport, 0) < 0) + continue; + rswitch->route_table[i] = sport; } -} -/** - * rio_enum_timeout- Signal that enumeration timed out - * @data: Address of timeout flag. - * - * When the enumeration complete timer expires, set a flag that - * signals to the discovery process that enumeration did not - * complete in a sane amount of time. - */ -static void rio_enum_timeout(unsigned long data) -{ - /* Enumeration timed out, set flag */ - *(int *)data = 1; + rio_unlock_device(net->hport, rdev->destid, rdev->hopcount); + } } /** @@ -1259,34 +1378,33 @@ static void rio_enum_timeout(unsigned long data) int __devinit rio_disc_mport(struct rio_mport *mport) { struct rio_net *net = NULL; - int enum_timeout_flag = 0; + unsigned long to_end; printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id, mport->name); /* If master port has an active link, allocate net and discover peers */ if (rio_mport_is_active(mport)) { - if (!(net = rio_alloc_net(mport))) { - printk(KERN_ERR "RIO: Failed to allocate new net\n"); - goto bail; - } + pr_debug("RIO: wait for enumeration to complete...\n"); - pr_debug("RIO: wait for enumeration complete..."); - - rio_enum_timer.expires = - jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; - rio_enum_timer.data = (unsigned long)&enum_timeout_flag; - add_timer(&rio_enum_timer); - while (!rio_enum_complete(mport)) { - mdelay(1); - if (enum_timeout_flag) { - del_timer_sync(&rio_enum_timer); - goto timeout; - } + to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; + while (time_before(jiffies, to_end)) { + if (rio_enum_complete(mport)) + goto enum_done; + schedule_timeout_uninterruptible(msecs_to_jiffies(10)); } - del_timer_sync(&rio_enum_timer); - pr_debug("done\n"); + pr_debug("RIO: discovery timeout on mport %d %s\n", + mport->id, mport->name); + goto bail; +enum_done: + pr_debug("RIO: ... enumeration done\n"); + + net = rio_alloc_net(mport, 0, 0); + if (!net) { + printk(KERN_ERR "RIO: Failed to allocate new net\n"); + goto bail; + } /* Read DestID assigned by enumerator */ rio_local_read_config_32(mport, RIO_DID_CSR, @@ -1302,13 +1420,10 @@ int __devinit rio_disc_mport(struct rio_mport *mport) goto bail; } - rio_build_route_tables(); + rio_build_route_tables(net); } return 0; - - timeout: - pr_debug("timeout\n"); - bail: +bail: return -EBUSY; } diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index c40665a4fa33..d4bd69013c50 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -33,6 +33,7 @@ static LIST_HEAD(rio_mports); static unsigned char next_portid; +static DEFINE_SPINLOCK(rio_mmap_lock); /** * rio_local_get_device_id - Get the base/extended device id for a port @@ -398,6 +399,49 @@ int rio_release_inb_pwrite(struct rio_dev *rdev) EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); /** + * rio_map_inb_region -- Map inbound memory region. + * @mport: Master port. + * @lstart: physical address of memory region to be mapped + * @rbase: RIO base address assigned to this window + * @size: Size of the memory region + * @rflags: Flags for mapping. + * + * Return: 0 -- Success. + * + * This function will create the mapping from RIO space to local memory. + */ +int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, + u64 rbase, u32 size, u32 rflags) +{ + int rc = 0; + unsigned long flags; + + if (!mport->ops->map_inb) + return -1; + spin_lock_irqsave(&rio_mmap_lock, flags); + rc = mport->ops->map_inb(mport, local, rbase, size, rflags); + spin_unlock_irqrestore(&rio_mmap_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(rio_map_inb_region); + +/** + * rio_unmap_inb_region -- Unmap the inbound memory region + * @mport: Master port + * @lstart: physical address of memory region to be unmapped + */ +void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) +{ + unsigned long flags; + if (!mport->ops->unmap_inb) + return; + spin_lock_irqsave(&rio_mmap_lock, flags); + mport->ops->unmap_inb(mport, lstart); + spin_unlock_irqrestore(&rio_mmap_lock, flags); +} +EXPORT_SYMBOL_GPL(rio_unmap_inb_region); + +/** * rio_mport_get_physefb - Helper function that returns register offset * for Physical Layer Extended Features Block. * @port: Master port to issue transaction @@ -1216,15 +1260,62 @@ static int __devinit rio_init(void) return 0; } +static struct workqueue_struct *rio_wq; + +struct rio_disc_work { + struct work_struct work; + struct rio_mport *mport; +}; + +static void __devinit disc_work_handler(struct work_struct *_work) +{ + struct rio_disc_work *work; + + work = container_of(_work, struct rio_disc_work, work); + pr_debug("RIO: discovery work for mport %d %s\n", + work->mport->id, work->mport->name); + rio_disc_mport(work->mport); + + kfree(work); +} + int __devinit rio_init_mports(void) { struct rio_mport *port; + struct rio_disc_work *work; + int no_disc = 0; list_for_each_entry(port, &rio_mports, node) { if (port->host_deviceid >= 0) rio_enum_mport(port); - else - rio_disc_mport(port); + else if (!no_disc) { + if (!rio_wq) { + rio_wq = alloc_workqueue("riodisc", 0, 0); + if (!rio_wq) { + pr_err("RIO: unable allocate rio_wq\n"); + no_disc = 1; + continue; + } + } + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (!work) { + pr_err("RIO: no memory for work struct\n"); + no_disc = 1; + continue; + } + + work->mport = port; + INIT_WORK(&work->work, disc_work_handler); + queue_work(rio_wq, &work->work); + } + } + + if (rio_wq) { + pr_debug("RIO: flush discovery workqueue\n"); + flush_workqueue(rio_wq); + pr_debug("RIO: flush discovery workqueue finished\n"); + destroy_workqueue(rio_wq); } rio_init(); |