diff options
Diffstat (limited to 'drivers/scsi/mpt3sas/mpt3sas_base.c')
| -rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_base.c | 522 | 
1 files changed, 342 insertions, 180 deletions
| diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index ac066f86bb14..5779f313f6f8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2905,23 +2905,22 @@ static int  _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)  {  	struct sysinfo s; -	int dma_mask;  	if (ioc->is_mcpu_endpoint ||  	    sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||  	    dma_get_required_mask(&pdev->dev) <= 32) -		dma_mask = 32; +		ioc->dma_mask = 32;  	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */  	else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) -		dma_mask = 63; +		ioc->dma_mask = 63;  	else -		dma_mask = 64; +		ioc->dma_mask = 64; -	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || -	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask))) +	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) || +	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))  		return -ENODEV; -	if (dma_mask > 32) { +	if (ioc->dma_mask > 32) {  		ioc->base_add_sg_single = &_base_add_sg_single_64;  		ioc->sge_size = sizeof(Mpi2SGESimple64_t);  	} else { @@ -2931,7 +2930,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)  	si_meminfo(&s);  	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", -		dma_mask, convert_to_kb(s.totalram)); +		ioc->dma_mask, convert_to_kb(s.totalram));  	return 0;  } @@ -3678,8 +3677,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,  	 * IOs on the target device is >=8.  	 */ -	if (atomic_read(&scmd->device->device_busy) > -	    MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) +	if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)  		return base_mod64((  		    atomic64_add_return(1, &ioc->high_iops_outstanding) /  		    MPT3SAS_HIGH_IOPS_BATCH_COUNT), @@ -4173,7 +4171,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,  }  /** - * _base_put_smid_default - Default, primarily used for config pages + * _base_put_smid_default_atomic - Default, primarily used for config pages   * use Atomic Request Descriptor   * @ioc: per adapter object   * @smid: system request message index @@ -5232,7 +5230,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)   * mpt3sas_free_enclosure_list - release memory   * @ioc: per adapter object   * - * Free memory allocated during encloure add. + * Free memory allocated during enclosure add.   */  void  mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) @@ -5338,10 +5336,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)  			dma_pool_free(ioc->pcie_sgl_dma_pool,  					ioc->pcie_sg_lookup[i].pcie_sgl,  					ioc->pcie_sg_lookup[i].pcie_sgl_dma); +			ioc->pcie_sg_lookup[i].pcie_sgl = NULL;  		}  		dma_pool_destroy(ioc->pcie_sgl_dma_pool);  	} -  	if (ioc->config_page) {  		dexitprintk(ioc,  			    ioc_info(ioc, "config_page(0x%p): free\n", @@ -5400,6 +5398,271 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)  }  /** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ +	int reduce_sz = 64; + +	if ((ioc->hba_queue_depth - reduce_sz) > +	    (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { +		ioc->hba_queue_depth -= reduce_sz; +		return 0; +	} else +		return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + *			for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ +	int i = 0, j = 0; +	struct chain_tracker *ct; + +	ioc->pcie_sgl_dma_pool = +	    dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, +	    ioc->page_size, 0); +	if (!ioc->pcie_sgl_dma_pool) { +		ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); +		return -ENOMEM; +	} + +	ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; +	ioc->chains_per_prp_buffer = +	    min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); +	for (i = 0; i < ioc->scsiio_depth; i++) { +		ioc->pcie_sg_lookup[i].pcie_sgl = +		    dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, +		    &ioc->pcie_sg_lookup[i].pcie_sgl_dma); +		if (!ioc->pcie_sg_lookup[i].pcie_sgl) { +			ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); +			return -EAGAIN; +		} + +		if (!mpt3sas_check_same_4gb_region( +		    (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { +			ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", +			    ioc->pcie_sg_lookup[i].pcie_sgl, +			    (unsigned long long) +			    ioc->pcie_sg_lookup[i].pcie_sgl_dma); +			ioc->use_32bit_dma = true; +			return -EAGAIN; +		} + +		for (j = 0; j < ioc->chains_per_prp_buffer; j++) { +			ct = &ioc->chain_lookup[i].chains_per_smid[j]; +			ct->chain_buffer = +			    ioc->pcie_sg_lookup[i].pcie_sgl + +			    (j * ioc->chain_segment_sz); +			ct->chain_buffer_dma = +			    ioc->pcie_sg_lookup[i].pcie_sgl_dma + +			    (j * ioc->chain_segment_sz); +		} +	} +	dinitprintk(ioc, ioc_info(ioc, +	    "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", +	    ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); +	dinitprintk(ioc, ioc_info(ioc, +	    "Number of chains can fit in a PRP page(%d)\n", +	    ioc->chains_per_prp_buffer)); +	return 0; +} + +/** + * _base_allocate_chain_dma_pool - Allocating DMA'able memory + *			for chain dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ +	int i = 0, j = 0; +	struct chain_tracker *ctr; + +	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, +	    ioc->chain_segment_sz, 16, 0); +	if (!ioc->chain_dma_pool) +		return -ENOMEM; + +	for (i = 0; i < ioc->scsiio_depth; i++) { +		for (j = ioc->chains_per_prp_buffer; +		    j < ioc->chains_needed_per_io; j++) { +			ctr = &ioc->chain_lookup[i].chains_per_smid[j]; +			ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, +			    GFP_KERNEL, &ctr->chain_buffer_dma); +			if (!ctr->chain_buffer) +				return -EAGAIN; +			if (!mpt3sas_check_same_4gb_region((long) +			    ctr->chain_buffer, ioc->chain_segment_sz)) { +				ioc_err(ioc, +				    "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", +				    ctr->chain_buffer, +				    (unsigned long long)ctr->chain_buffer_dma); +				ioc->use_32bit_dma = true; +				return -EAGAIN; +			} +		} +	} +	dinitprintk(ioc, ioc_info(ioc, +	    "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n", +	    ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * +	    (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * +	    ioc->chain_segment_sz))/1024)); +	return 0; +} + +/** + * _base_allocate_sense_dma_pool - Allocating DMA'able memory + *			for sense dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ +	ioc->sense_dma_pool = +	    dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); +	if (!ioc->sense_dma_pool) +		return -ENOMEM; +	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, +	    GFP_KERNEL, &ioc->sense_dma); +	if (!ioc->sense) +		return -EAGAIN; +	if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) { +		dinitprintk(ioc, pr_err( +		    "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", +		    ioc->sense, (unsigned long long) ioc->sense_dma)); +		ioc->use_32bit_dma = true; +		return -EAGAIN; +	} +	ioc_info(ioc, +	    "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", +	    ioc->sense, (unsigned long long)ioc->sense_dma, +	    ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); +	return 0; +} + +/** + * _base_allocate_reply_pool - Allocating DMA'able memory + *			for reply pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ +	/* reply pool, 4 byte align */ +	ioc->reply_dma_pool = dma_pool_create("reply pool", +	    &ioc->pdev->dev, sz, 4, 0); +	if (!ioc->reply_dma_pool) +		return -ENOMEM; +	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, +	    &ioc->reply_dma); +	if (!ioc->reply) +		return -EAGAIN; +	if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { +		dinitprintk(ioc, pr_err( +		    "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", +		    ioc->reply, (unsigned long long) ioc->reply_dma)); +		ioc->use_32bit_dma = true; +		return -EAGAIN; +	} +	ioc->reply_dma_min_address = (u32)(ioc->reply_dma); +	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; +	ioc_info(ioc, +	    "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", +	    ioc->reply, (unsigned long long)ioc->reply_dma, +	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); +	return 0; +} + +/** + * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory + *			for reply free dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ +	/* reply free queue, 16 byte align */ +	ioc->reply_free_dma_pool = dma_pool_create( +	    "reply_free pool", &ioc->pdev->dev, sz, 16, 0); +	if (!ioc->reply_free_dma_pool) +		return -ENOMEM; +	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, +	    GFP_KERNEL, &ioc->reply_free_dma); +	if (!ioc->reply_free) +		return -EAGAIN; +	if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { +		dinitprintk(ioc, +		    pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", +		    ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); +		ioc->use_32bit_dma = true; +		return -EAGAIN; +	} +	memset(ioc->reply_free, 0, sz); +	dinitprintk(ioc, ioc_info(ioc, +	    "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", +	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); +	dinitprintk(ioc, ioc_info(ioc, +	    "reply_free_dma (0x%llx)\n", +	    (unsigned long long)ioc->reply_free_dma)); +	return 0; +} + +/** + * _base_allocate_reply_post_free_array - Allocating DMA'able memory + *			for reply post free array. + * @ioc: Adapter object + * @reply_post_free_array_sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, +	u32 reply_post_free_array_sz) +{ +	ioc->reply_post_free_array_dma_pool = +	    dma_pool_create("reply_post_free_array pool", +	    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); +	if (!ioc->reply_post_free_array_dma_pool) +		return -ENOMEM; +	ioc->reply_post_free_array = +	    dma_pool_alloc(ioc->reply_post_free_array_dma_pool, +	    GFP_KERNEL, &ioc->reply_post_free_array_dma); +	if (!ioc->reply_post_free_array) +		return -EAGAIN; +	if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array, +	    reply_post_free_array_sz)) { +		dinitprintk(ioc, pr_err( +		    "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", +		    ioc->reply_free, +		    (unsigned long long) ioc->reply_free_dma)); +		ioc->use_32bit_dma = true; +		return -EAGAIN; +	} +	return 0; +} +/**   * base_alloc_rdpq_dma_pool - Allocating DMA'able memory   *                     for reply queues.   * @ioc: per adapter object @@ -5492,13 +5755,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)  	u16 chains_needed_per_io;  	u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;  	u32 retry_sz; -	u32 rdpq_sz = 0; +	u32 rdpq_sz = 0, sense_sz = 0;  	u16 max_request_credit, nvme_blocks_needed;  	unsigned short sg_tablesize;  	u16 sge_size; -	int i, j; -	int ret = 0; -	struct chain_tracker *ct; +	int i; +	int ret = 0, rc = 0;  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -5802,6 +6064,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)  	 * be required for NVMe PRP's, only each set of NVMe blocks will be  	 * contiguous, so a new set is allocated for each possible I/O.  	 */ +  	ioc->chains_per_prp_buffer = 0;  	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {  		nvme_blocks_needed = @@ -5816,190 +6079,67 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)  			goto out;  		}  		sz = nvme_blocks_needed * ioc->page_size; -		ioc->pcie_sgl_dma_pool = -			dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); -		if (!ioc->pcie_sgl_dma_pool) { -			ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n"); -			goto out; -		} - -		ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; -		ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, -						ioc->chains_needed_per_io); - -		for (i = 0; i < ioc->scsiio_depth; i++) { -			ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( -				ioc->pcie_sgl_dma_pool, GFP_KERNEL, -				&ioc->pcie_sg_lookup[i].pcie_sgl_dma); -			if (!ioc->pcie_sg_lookup[i].pcie_sgl) { -				ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); -				goto out; -			} -			for (j = 0; j < ioc->chains_per_prp_buffer; j++) { -				ct = &ioc->chain_lookup[i].chains_per_smid[j]; -				ct->chain_buffer = -				    ioc->pcie_sg_lookup[i].pcie_sgl + -				    (j * ioc->chain_segment_sz); -				ct->chain_buffer_dma = -				    ioc->pcie_sg_lookup[i].pcie_sgl_dma + -				    (j * ioc->chain_segment_sz); -			} -		} - -		dinitprintk(ioc, -			    ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", -				     ioc->scsiio_depth, sz, -				     (sz * ioc->scsiio_depth) / 1024)); -		dinitprintk(ioc, -			    ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n", -				     ioc->chains_per_prp_buffer)); +		rc = _base_allocate_pcie_sgl_pool(ioc, sz); +		if (rc == -ENOMEM) +			return -ENOMEM; +		else if (rc == -EAGAIN) +			goto try_32bit_dma;  		total_sz += sz * ioc->scsiio_depth;  	} -	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, -	    ioc->chain_segment_sz, 16, 0); -	if (!ioc->chain_dma_pool) { -		ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n"); -		goto out; -	} -	for (i = 0; i < ioc->scsiio_depth; i++) { -		for (j = ioc->chains_per_prp_buffer; -				j < ioc->chains_needed_per_io; j++) { -			ct = &ioc->chain_lookup[i].chains_per_smid[j]; -			ct->chain_buffer = dma_pool_alloc( -					ioc->chain_dma_pool, GFP_KERNEL, -					&ct->chain_buffer_dma); -			if (!ct->chain_buffer) { -				ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); -				goto out; -			} -		} -		total_sz += ioc->chain_segment_sz; -	} - +	rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); +	if (rc == -ENOMEM) +		return -ENOMEM; +	else if (rc == -EAGAIN) +		goto try_32bit_dma; +	total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - +		ioc->chains_per_prp_buffer) * ioc->scsiio_depth);  	dinitprintk(ioc, -		    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", -			     ioc->chain_depth, ioc->chain_segment_sz, -			     (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); - +	    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", +	    ioc->chain_depth, ioc->chain_segment_sz, +	    (ioc->chain_depth * ioc->chain_segment_sz) / 1024));  	/* sense buffers, 4 byte align */ -	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; -	ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, -					      4, 0); -	if (!ioc->sense_dma_pool) { -		ioc_err(ioc, "sense pool: dma_pool_create failed\n"); -		goto out; -	} -	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, -	    &ioc->sense_dma); -	if (!ioc->sense) { -		ioc_err(ioc, "sense pool: dma_pool_alloc failed\n"); -		goto out; -	} -	/* sense buffer requires to be in same 4 gb region. -	 * Below function will check the same. -	 * In case of failure, new pci pool will be created with updated -	 * alignment. Older allocation and pool will be destroyed. -	 * Alignment will be used such a way that next allocation if -	 * success, will always meet same 4gb region requirement. -	 * Actual requirement is not alignment, but we need start and end of -	 * DMA address must have same upper 32 bit address. -	 */ -	if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) { -		//Release Sense pool & Reallocate -		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); -		dma_pool_destroy(ioc->sense_dma_pool); -		ioc->sense = NULL; - -		ioc->sense_dma_pool = -			dma_pool_create("sense pool", &ioc->pdev->dev, sz, -						roundup_pow_of_two(sz), 0); -		if (!ioc->sense_dma_pool) { -			ioc_err(ioc, "sense pool: pci_pool_create failed\n"); -			goto out; -		} -		ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, -				&ioc->sense_dma); -		if (!ioc->sense) { -			ioc_err(ioc, "sense pool: pci_pool_alloc failed\n"); -			goto out; -		} -	} +	sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; +	rc = _base_allocate_sense_dma_pool(ioc, sense_sz); +	if (rc  == -ENOMEM) +		return -ENOMEM; +	else if (rc == -EAGAIN) +		goto try_32bit_dma; +	total_sz += sense_sz;  	ioc_info(ioc,  	    "sense pool(0x%p)- dma(0x%llx): depth(%d),"  	    "element_size(%d), pool_size(%d kB)\n",  	    ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,  	    SCSI_SENSE_BUFFERSIZE, sz / 1024); - -	total_sz += sz; -  	/* reply pool, 4 byte align */  	sz = ioc->reply_free_queue_depth * ioc->reply_sz; -	ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, -					      4, 0); -	if (!ioc->reply_dma_pool) { -		ioc_err(ioc, "reply pool: dma_pool_create failed\n"); -		goto out; -	} -	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, -	    &ioc->reply_dma); -	if (!ioc->reply) { -		ioc_err(ioc, "reply pool: dma_pool_alloc failed\n"); -		goto out; -	} -	ioc->reply_dma_min_address = (u32)(ioc->reply_dma); -	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; -	dinitprintk(ioc, -		    ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", -			     ioc->reply, ioc->reply_free_queue_depth, -			     ioc->reply_sz, sz / 1024)); -	dinitprintk(ioc, -		    ioc_info(ioc, "reply_dma(0x%llx)\n", -			     (unsigned long long)ioc->reply_dma)); +	rc = _base_allocate_reply_pool(ioc, sz); +	if (rc == -ENOMEM) +		return -ENOMEM; +	else if (rc == -EAGAIN) +		goto try_32bit_dma;  	total_sz += sz;  	/* reply free queue, 16 byte align */  	sz = ioc->reply_free_queue_depth * 4; -	ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", -	    &ioc->pdev->dev, sz, 16, 0); -	if (!ioc->reply_free_dma_pool) { -		ioc_err(ioc, "reply_free pool: dma_pool_create failed\n"); -		goto out; -	} -	ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL, -	    &ioc->reply_free_dma); -	if (!ioc->reply_free) { -		ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n"); -		goto out; -	} -	dinitprintk(ioc, -		    ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", -			     ioc->reply_free, ioc->reply_free_queue_depth, -			     4, sz / 1024)); +	rc = _base_allocate_reply_free_dma_pool(ioc, sz); +	if (rc  == -ENOMEM) +		return -ENOMEM; +	else if (rc == -EAGAIN) +		goto try_32bit_dma;  	dinitprintk(ioc,  		    ioc_info(ioc, "reply_free_dma (0x%llx)\n",  			     (unsigned long long)ioc->reply_free_dma));  	total_sz += sz; -  	if (ioc->rdpq_array_enable) {  		reply_post_free_array_sz = ioc->reply_queue_count *  		    sizeof(Mpi2IOCInitRDPQArrayEntry); -		ioc->reply_post_free_array_dma_pool = -		    dma_pool_create("reply_post_free_array pool", -		    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); -		if (!ioc->reply_post_free_array_dma_pool) { -			dinitprintk(ioc, -				    ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n")); -			goto out; -		} -		ioc->reply_post_free_array = -		    dma_pool_alloc(ioc->reply_post_free_array_dma_pool, -		    GFP_KERNEL, &ioc->reply_post_free_array_dma); -		if (!ioc->reply_post_free_array) { -			dinitprintk(ioc, -				    ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n")); -			goto out; -		} +		rc = _base_allocate_reply_post_free_array(ioc, +		    reply_post_free_array_sz); +		if (rc == -ENOMEM) +			return -ENOMEM; +		else if (rc == -EAGAIN) +			goto try_32bit_dma;  	}  	ioc->config_page_sz = 512;  	ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, @@ -6022,6 +6162,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)  		 ioc->shost->sg_tablesize);  	return 0; +try_32bit_dma: +	_base_release_memory_pools(ioc); +	if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { +		/* Change dma coherent mask to 32 bit and reallocate */ +		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { +			pr_err("Setting 32 bit coherent DMA mask Failed %s\n", +			    pci_name(ioc->pdev)); +			return -ENODEV; +		} +	} else if (_base_reduce_hba_queue_depth(ioc) != 0) +		return -ENOMEM; +	goto retry_allocation; +   out:  	return -ENOMEM;  } @@ -7252,6 +7405,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)  	ioc_info(ioc, "sending diag reset !!\n"); +	pci_cfg_access_lock(ioc->pdev); +  	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));  	count = 0; @@ -7342,10 +7497,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)  		goto out;  	} +	pci_cfg_access_unlock(ioc->pdev);  	ioc_info(ioc, "diag reset: SUCCESS\n");  	return 0;   out: +	pci_cfg_access_unlock(ioc->pdev);  	ioc_err(ioc, "diag reset: FAILED\n");  	return -EFAULT;  } @@ -7682,6 +7839,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)  	ioc->rdpq_array_enable_assigned = 0;  	ioc->use_32bit_dma = false; +	ioc->dma_mask = 64;  	if (ioc->is_aero_ioc)  		ioc->base_readl = &_base_readl_aero;  	else @@ -7806,14 +7964,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)  		ioc->pend_os_device_add_sz++;  	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,  	    GFP_KERNEL); -	if (!ioc->pend_os_device_add) +	if (!ioc->pend_os_device_add) { +		r = -ENOMEM;  		goto out_free_resources; +	}  	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;  	ioc->device_remove_in_progress =  		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); -	if (!ioc->device_remove_in_progress) +	if (!ioc->device_remove_in_progress) { +		r = -ENOMEM;  		goto out_free_resources; +	}  	ioc->fwfault_debug = mpt3sas_fwfault_debug; | 
