From 95832c00bc5ce3ed140f5f9b669803f75ac1431e Mon Sep 17 00:00:00 2001 From: John Youn Date: Mon, 23 Jan 2017 14:57:26 -0800 Subject: usb: dwc2: Fix usage of bool params Check these parameters only for true or false. There is no need to check for greater or less than 0. Signed-off-by: John Youn Signed-off-by: Felipe Balbi --- drivers/usb/dwc2/hcd.c | 113 ++++++++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 57 deletions(-) (limited to 'drivers/usb/dwc2/hcd.c') diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index c08c07a46bc6..8b688b041696 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) /* Enable the interrupts in the GINTMSK */ intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; - if (hsotg->params.host_dma <= 0) + if (!hsotg->params.host_dma) intmsk |= GINTSTS_RXFLVL; - if (hsotg->params.external_id_pin_ctl <= 0) + if (!hsotg->params.external_id_pin_ctl) intmsk |= GINTSTS_CONIDSTSCHNG; intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | @@ -100,7 +100,7 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->params.ulpi_fs_ls > 0) || + hsotg->params.ulpi_fs_ls) || hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { /* Full speed PHY */ val = HCFG_FSLSPCLKSEL_48_MHZ; @@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) if (dwc2_is_host_mode(hsotg)) dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->params.i2c_enable > 0) { + if (hsotg->params.i2c_enable) { dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); /* Program GUSBCFG.OtgUtmiFsSel to I2C */ @@ -195,7 +195,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); usbcfg |= GUSBCFG_ULPI_UTMI_SEL; usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); - if (hsotg->params.phy_ulpi_ddr > 0) + if (hsotg->params.phy_ulpi_ddr) usbcfg |= GUSBCFG_DDRSEL; break; case DWC2_PHY_TYPE_PARAM_UTMI: @@ -246,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->params.ulpi_fs_ls > 0) { + hsotg->params.ulpi_fs_ls) { dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg |= GUSBCFG_ULPI_FS_LS; @@ -290,17 +290,17 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) hsotg->params.host_dma, hsotg->params.dma_desc_enable); - if (hsotg->params.host_dma > 0) { - if (hsotg->params.dma_desc_enable > 0) + if (hsotg->params.host_dma) { + if (hsotg->params.dma_desc_enable) dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); else dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); } else { dev_dbg(hsotg->dev, "Using Slave mode\n"); - hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_enable = false; } - if (hsotg->params.host_dma > 0) + if (hsotg->params.host_dma) ahbcfg |= GAHBCFG_DMA_EN; dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); @@ -491,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", dwc2_readl(hsotg->regs + HPTXFSIZ)); - if (hsotg->params.en_multiple_tx_fifo > 0 && + if (hsotg->params.en_multiple_tx_fifo && hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { /* * Global DFIFOCFG calculation for Host mode - @@ -771,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, * For Descriptor DMA mode core halts the channel on AHB error. * Interrupt is not required. */ - if (hsotg->params.dma_desc_enable <= 0) { + if (!hsotg->params.dma_desc_enable) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "desc DMA disabled\n"); hcintmsk |= HCINTMSK_AHBERR; @@ -804,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, { u32 intmsk; - if (hsotg->params.host_dma > 0) { + if (hsotg->params.host_dma) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA enabled\n"); dwc2_hc_enable_dma_ints(hsotg, chan); @@ -1024,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, /* No need to set the bit in DDMA for disabling the channel */ /* TODO check it everywhere channel is disabled */ - if (hsotg->params.dma_desc_enable <= 0) { + if (!hsotg->params.dma_desc_enable) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "desc DMA disabled\n"); hcchar |= HCCHAR_CHENA; @@ -1034,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, } hcchar |= HCCHAR_CHDIS; - if (hsotg->params.host_dma <= 0) { + if (!hsotg->params.host_dma) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA not enabled\n"); hcchar |= HCCHAR_CHENA; @@ -1380,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "%s()\n", __func__); if (chan->do_ping) { - if (hsotg->params.host_dma <= 0) { + if (!hsotg->params.host_dma) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "ping, no DMA\n"); dwc2_hc_do_ping(hsotg, chan); @@ -1508,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, TSIZ_SC_MC_PID_SHIFT); } - if (hsotg->params.host_dma > 0) { + if (hsotg->params.host_dma) { dwc2_writel((u32)chan->xfer_dma, hsotg->regs + HCDMA(chan->hc_num)); if (dbg_hc(chan)) @@ -1551,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, chan->xfer_started = 1; chan->requests++; - if (hsotg->params.host_dma <= 0 && + if (!hsotg->params.host_dma && !chan->ep_is_in && chan->xfer_len > 0) /* Load OUT packet into the appropriate Tx FIFO */ dwc2_hc_write_packet(hsotg, chan); @@ -1834,7 +1834,7 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) u32 hcchar; int i; - if (hsotg->params.host_dma <= 0) { + if (!hsotg->params.host_dma) { /* Flush out any channel requests in slave mode */ for (i = 0; i < num_channels; i++) { channel = hsotg->hc_ptr_array[i]; @@ -1870,7 +1870,7 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) channel->qh = NULL; } /* All channels have been freed, mark them available */ - if (hsotg->params.uframe_sched > 0) { + if (hsotg->params.uframe_sched) { hsotg->available_host_channels = hsotg->params.host_channels; } else { @@ -2107,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, * Free the QTD and clean up the associated QH. Leave the QH in the * schedule if it has any remaining QTDs. */ - if (hsotg->params.dma_desc_enable <= 0) { + if (!hsotg->params.dma_desc_enable) { u8 in_process = urb_qtd->in_process; dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); @@ -2215,13 +2215,12 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) /* Set ULPI External VBUS bit if needed */ usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; - if (hsotg->params.phy_ulpi_ext_vbus == - DWC2_PHY_ULPI_EXTERNAL_VBUS) + if (hsotg->params.phy_ulpi_ext_vbus) usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; /* Set external TS Dline pulsing bit if needed */ usbcfg &= ~GUSBCFG_TERMSELDLPULSE; - if (hsotg->params.ts_dline > 0) + if (hsotg->params.ts_dline) usbcfg |= GUSBCFG_TERMSELDLPULSE; dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); @@ -2316,13 +2315,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) * runtime. This bit needs to be programmed during initial configuration * and its value must not be changed during runtime. */ - if (hsotg->params.reload_ctl > 0) { + if (hsotg->params.reload_ctl) { hfir = dwc2_readl(hsotg->regs + HFIR); hfir |= HFIR_RLDCTRL; dwc2_writel(hfir, hsotg->regs + HFIR); } - if (hsotg->params.dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable) { u32 op_mode = hsotg->hw_params.op_mode; if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || @@ -2334,7 +2333,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) "Hardware does not support descriptor DMA mode -\n"); dev_err(hsotg->dev, "falling back to buffer DMA mode.\n"); - hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_enable = false; } else { hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_DESCDMA; @@ -2360,7 +2359,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) otgctl &= ~GOTGCTL_HSTSETHNPEN; dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - if (hsotg->params.dma_desc_enable <= 0) { + if (!hsotg->params.dma_desc_enable) { int num_channels, i; u32 hcchar; @@ -2427,7 +2426,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) hsotg->flags.d32 = 0; hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; - if (hsotg->params.uframe_sched > 0) { + if (hsotg->params.uframe_sched) { hsotg->available_host_channels = hsotg->params.host_channels; } else { @@ -2485,7 +2484,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->do_ping = 0; chan->ep_is_in = 0; chan->data_pid_start = DWC2_HC_PID_SETUP; - if (hsotg->params.host_dma > 0) + if (hsotg->params.host_dma) chan->xfer_dma = urb->setup_dma; else chan->xfer_buf = urb->setup_packet; @@ -2512,7 +2511,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->do_ping = 0; chan->data_pid_start = DWC2_HC_PID_DATA1; chan->xfer_len = 0; - if (hsotg->params.host_dma > 0) + if (hsotg->params.host_dma) chan->xfer_dma = hsotg->status_buf_dma; else chan->xfer_buf = hsotg->status_buf; @@ -2530,13 +2529,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, case USB_ENDPOINT_XFER_ISOC: chan->ep_type = USB_ENDPOINT_XFER_ISOC; - if (hsotg->params.dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable) break; frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; frame_desc->status = 0; - if (hsotg->params.host_dma > 0) { + if (hsotg->params.host_dma) { chan->xfer_dma = urb->dma; chan->xfer_dma += frame_desc->offset + qtd->isoc_split_offset; @@ -2718,7 +2717,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) !dwc2_hcd_is_pipe_in(&urb->pipe_info)) urb->actual_length = urb->length; - if (hsotg->params.host_dma > 0) + if (hsotg->params.host_dma) chan->xfer_dma = urb->dma + urb->actual_length; else chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; @@ -2743,7 +2742,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) */ chan->multi_count = dwc2_hb_mult(qh->maxp); - if (hsotg->params.dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable) { chan->desc_list_addr = qh->desc_list_dma; chan->desc_list_sz = qh->desc_list_sz; } @@ -2780,7 +2779,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( while (qh_ptr != &hsotg->periodic_sched_ready) { if (list_empty(&hsotg->free_hc_list)) break; - if (hsotg->params.uframe_sched > 0) { + if (hsotg->params.uframe_sched) { if (hsotg->available_host_channels <= 1) break; hsotg->available_host_channels--; @@ -2807,14 +2806,14 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( num_channels = hsotg->params.host_channels; qh_ptr = hsotg->non_periodic_sched_inactive.next; while (qh_ptr != &hsotg->non_periodic_sched_inactive) { - if (hsotg->params.uframe_sched <= 0 && + if (!hsotg->params.uframe_sched && hsotg->non_periodic_channels >= num_channels - hsotg->periodic_channels) break; if (list_empty(&hsotg->free_hc_list)) break; qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); - if (hsotg->params.uframe_sched > 0) { + if (hsotg->params.uframe_sched) { if (hsotg->available_host_channels < 1) break; hsotg->available_host_channels--; @@ -2836,7 +2835,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( else ret_val = DWC2_TRANSACTION_ALL; - if (hsotg->params.uframe_sched <= 0) + if (!hsotg->params.uframe_sched) hsotg->non_periodic_channels++; } @@ -2875,8 +2874,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, list_move_tail(&chan->split_order_list_entry, &hsotg->split_order); - if (hsotg->params.host_dma > 0) { - if (hsotg->params.dma_desc_enable > 0) { + if (hsotg->params.host_dma) { + if (hsotg->params.dma_desc_enable) { if (!chan->xfer_started || chan->ep_type == USB_ENDPOINT_XFER_ISOC) { dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); @@ -2985,7 +2984,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * The flag prevents any halts to get into the request queue in * the middle of multiple high-bandwidth packets getting queued. */ - if (hsotg->params.host_dma <= 0 && + if (!hsotg->params.host_dma && qh->channel->multi_count > 1) hsotg->queuing_high_bandwidth = 1; @@ -3004,7 +3003,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * controller automatically handles multiple packets for * high-bandwidth transfers. */ - if (hsotg->params.host_dma > 0 || status == 0 || + if (hsotg->params.host_dma || status == 0 || qh->channel->requests == qh->channel->multi_count) { qh_ptr = qh_ptr->next; /* @@ -3021,7 +3020,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) exit: if (no_queue_space || no_fifo_space || - (hsotg->params.host_dma <= 0 && + (!hsotg->params.host_dma && !list_empty(&hsotg->periodic_sched_assigned))) { /* * May need to queue more transactions as the request @@ -3101,7 +3100,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; - if (hsotg->params.host_dma <= 0 && qspcavail == 0) { + if (!hsotg->params.host_dma && qspcavail == 0) { no_queue_space = 1; break; } @@ -3134,7 +3133,7 @@ next: hsotg->non_periodic_qh_ptr->next; } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); - if (hsotg->params.host_dma <= 0) { + if (!hsotg->params.host_dma) { tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; @@ -3611,7 +3610,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, u32 hcfg; dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); - hsotg->params.dma_desc_enable = 1; + hsotg->params.dma_desc_enable = true; hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_DESCDMA; dwc2_writel(hcfg, hsotg->regs + HCFG); @@ -4912,7 +4911,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) } } - if (hsotg->params.host_dma > 0) { + if (hsotg->params.host_dma) { if (hsotg->status_buf) { dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, hsotg->status_buf, @@ -4992,16 +4991,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) hsotg->last_frame_num = HFNUM_MAX_FRNUM; /* Check if the bus driver or platform code has setup a dma_mask */ - if (hsotg->params.host_dma > 0 && + if (hsotg->params.host_dma && !hsotg->dev->dma_mask) { dev_warn(hsotg->dev, "dma_mask not set, disabling DMA\n"); hsotg->params.host_dma = false; - hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_enable = false; } /* Set device flags indicating whether the HCD supports DMA */ - if (hsotg->params.host_dma > 0) { + if (hsotg->params.host_dma) { if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) dev_warn(hsotg->dev, "can't set DMA mask\n"); if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) @@ -5012,7 +5011,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (!hcd) goto error1; - if (hsotg->params.host_dma <= 0) + if (!hsotg->params.host_dma) hcd->self.uses_dma = 0; hcd->has_tt = 1; @@ -5084,7 +5083,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * done after usb_add_hcd since that function allocates the DMA buffer * pool. */ - if (hsotg->params.host_dma > 0) + if (hsotg->params.host_dma) hsotg->status_buf = dma_alloc_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, &hsotg->status_buf_dma, GFP_KERNEL); @@ -5114,8 +5113,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * Disable descriptor dma mode since it will not be * usable. */ - hsotg->params.dma_desc_enable = 0; - hsotg->params.dma_desc_fs_enable = 0; + hsotg->params.dma_desc_enable = false; + hsotg->params.dma_desc_fs_enable = false; } hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", @@ -5131,8 +5130,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * Disable descriptor dma mode since it will not be * usable. */ - hsotg->params.dma_desc_enable = 0; - hsotg->params.dma_desc_fs_enable = 0; + hsotg->params.dma_desc_enable = false; + hsotg->params.dma_desc_fs_enable = false; } } -- cgit v1.2.3