summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/powernv/pci-sriov.c
blob: c5b7e20286c674194ead3d2225623df511a42654 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
// SPDX-License-Identifier: GPL-2.0

#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/bitmap.h>
#include <linux/pci.h>

#include <asm/opal.h>

#include "pci.h"

/* for pci_dev_is_added() */
#include "../../../../drivers/pci/pci.h"


static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
{
	struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
	const resource_size_t gate = phb->ioda.m64_segsize >> 2;
	struct resource *res;
	int i;
	resource_size_t size, total_vf_bar_sz;
	struct pnv_iov_data *iov;
	int mul, total_vfs;

	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
	if (!iov)
		goto truncate_iov;
	pdev->dev.archdata.iov_data = iov;

	total_vfs = pci_sriov_get_totalvfs(pdev);
	mul = phb->ioda.total_pe_num;
	total_vf_bar_sz = 0;

	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
		res = &pdev->resource[i + PCI_IOV_RESOURCES];
		if (!res->flags || res->parent)
			continue;
		if (!pnv_pci_is_m64_flags(res->flags)) {
			dev_warn(&pdev->dev, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n",
				 i, res);
			goto truncate_iov;
		}

		total_vf_bar_sz += pci_iov_resource_size(pdev,
				i + PCI_IOV_RESOURCES);

		/*
		 * If bigger than quarter of M64 segment size, just round up
		 * power of two.
		 *
		 * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
		 * with other devices, IOV BAR size is expanded to be
		 * (total_pe * VF_BAR_size).  When VF_BAR_size is half of M64
		 * segment size , the expanded size would equal to half of the
		 * whole M64 space size, which will exhaust the M64 Space and
		 * limit the system flexibility.  This is a design decision to
		 * set the boundary to quarter of the M64 segment size.
		 */
		if (total_vf_bar_sz > gate) {
			mul = roundup_pow_of_two(total_vfs);
			dev_info(&pdev->dev,
				"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
				total_vf_bar_sz, gate, mul);
			iov->m64_single_mode = true;
			break;
		}
	}

	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
		res = &pdev->resource[i + PCI_IOV_RESOURCES];
		if (!res->flags || res->parent)
			continue;

		size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
		/*
		 * On PHB3, the minimum size alignment of M64 BAR in single
		 * mode is 32MB.
		 */
		if (iov->m64_single_mode && (size < SZ_32M))
			goto truncate_iov;
		dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
		res->end = res->start + size * mul - 1;
		dev_dbg(&pdev->dev, "                       %pR\n", res);
		dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
			 i, res, mul);
	}
	iov->vfs_expanded = mul;

	return;

truncate_iov:
	/* To save MMIO space, IOV BAR is truncated. */
	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
		res = &pdev->resource[i + PCI_IOV_RESOURCES];
		res->flags = 0;
		res->end = res->start - 1;
	}

	pdev->dev.archdata.iov_data = NULL;
	kfree(iov);
}

void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
{
	if (WARN_ON(pci_dev_is_added(pdev)))
		return;

	if (pdev->is_virtfn) {
		struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);

		/*
		 * VF PEs are single-device PEs so their pdev pointer needs to
		 * be set. The pdev doesn't exist when the PE is allocated (in
		 * (pcibios_sriov_enable()) so we fix it up here.
		 */
		pe->pdev = pdev;
		WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
	} else if (pdev->is_physfn) {
		/*
		 * For PFs adjust their allocated IOV resources to match what
		 * the PHB can support using it's M64 BAR table.
		 */
		pnv_pci_ioda_fixup_iov_resources(pdev);
	}
}

resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
						      int resno)
{
	struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
	struct pnv_iov_data *iov = pnv_iov_get(pdev);
	resource_size_t align;

	/*
	 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
	 * SR-IOV. While from hardware perspective, the range mapped by M64
	 * BAR should be size aligned.
	 *
	 * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
	 * powernv-specific hardware restriction is gone. But if just use the
	 * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
	 * in one segment of M64 #15, which introduces the PE conflict between
	 * PF and VF. Based on this, the minimum alignment of an IOV BAR is
	 * m64_segsize.
	 *
	 * This function returns the total IOV BAR size if M64 BAR is in
	 * Shared PE mode or just VF BAR size if not.
	 * If the M64 BAR is in Single PE mode, return the VF BAR size or
	 * M64 segment size if IOV BAR size is less.
	 */
	align = pci_iov_resource_size(pdev, resno);

	/*
	 * iov can be null if we have an SR-IOV device with IOV BAR that can't
	 * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch).
	 * In that case we don't allow VFs to be enabled so just return the
	 * default alignment.
	 */
	if (!iov)
		return align;
	if (!iov->vfs_expanded)
		return align;
	if (iov->m64_single_mode)
		return max(align, (resource_size_t)phb->ioda.m64_segsize);

	return iov->vfs_expanded * align;
}

static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
{
	struct pnv_iov_data   *iov;
	struct pnv_phb        *phb;
	int                    i, j;
	int                    m64_bars;

	phb = pci_bus_to_pnvhb(pdev->bus);
	iov = pnv_iov_get(pdev);

	if (iov->m64_single_mode)
		m64_bars = num_vfs;
	else
		m64_bars = 1;

	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
		for (j = 0; j < m64_bars; j++) {
			if (iov->m64_map[j][i] == IODA_INVALID_M64)
				continue;
			opal_pci_phb_mmio_enable(phb->opal_id,
				OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 0);
			clear_bit(iov->m64_map[j][i], &phb->ioda.m64_bar_alloc);
			iov->m64_map[j][i] = IODA_INVALID_M64;
		}

	kfree(iov->m64_map);
	return 0;
}

static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
{
	struct pnv_iov_data   *iov;
	struct pnv_phb        *phb;
	unsigned int           win;
	struct resource       *res;
	int                    i, j;
	int64_t                rc;
	int                    total_vfs;
	resource_size_t        size, start;
	int                    pe_num;
	int                    m64_bars;

	phb = pci_bus_to_pnvhb(pdev->bus);
	iov = pnv_iov_get(pdev);
	total_vfs = pci_sriov_get_totalvfs(pdev);

	if (iov->m64_single_mode)
		m64_bars = num_vfs;
	else
		m64_bars = 1;

	iov->m64_map = kmalloc_array(m64_bars,
				     sizeof(*iov->m64_map),
				     GFP_KERNEL);
	if (!iov->m64_map)
		return -ENOMEM;
	/* Initialize the m64_map to IODA_INVALID_M64 */
	for (i = 0; i < m64_bars ; i++)
		for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
			iov->m64_map[i][j] = IODA_INVALID_M64;


	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
		res = &pdev->resource[i + PCI_IOV_RESOURCES];
		if (!res->flags || !res->parent)
			continue;

		for (j = 0; j < m64_bars; j++) {
			do {
				win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
						phb->ioda.m64_bar_idx + 1, 0);

				if (win >= phb->ioda.m64_bar_idx + 1)
					goto m64_failed;
			} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));

			iov->m64_map[j][i] = win;

			if (iov->m64_single_mode) {
				size = pci_iov_resource_size(pdev,
							PCI_IOV_RESOURCES + i);
				start = res->start + size * j;
			} else {
				size = resource_size(res);
				start = res->start;
			}

			/* Map the M64 here */
			if (iov->m64_single_mode) {
				pe_num = iov->pe_num_map[j];
				rc = opal_pci_map_pe_mmio_window(phb->opal_id,
						pe_num, OPAL_M64_WINDOW_TYPE,
						iov->m64_map[j][i], 0);
			}

			rc = opal_pci_set_phb_mem_window(phb->opal_id,
						 OPAL_M64_WINDOW_TYPE,
						 iov->m64_map[j][i],
						 start,
						 0, /* unused */
						 size);


			if (rc != OPAL_SUCCESS) {
				dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
					win, rc);
				goto m64_failed;
			}

			if (iov->m64_single_mode)
				rc = opal_pci_phb_mmio_enable(phb->opal_id,
				     OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 2);
			else
				rc = opal_pci_phb_mmio_enable(phb->opal_id,
				     OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 1);

			if (rc != OPAL_SUCCESS) {
				dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
					win, rc);
				goto m64_failed;
			}
		}
	}
	return 0;

m64_failed:
	pnv_pci_vf_release_m64(pdev, num_vfs);
	return -EBUSY;
}

static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
{
	struct pnv_phb        *phb;
	struct pnv_ioda_pe    *pe, *pe_n;

	phb = pci_bus_to_pnvhb(pdev->bus);

	if (!pdev->is_physfn)
		return;

	/* FIXME: Use pnv_ioda_release_pe()? */
	list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
		if (pe->parent_dev != pdev)
			continue;

		pnv_pci_ioda2_release_pe_dma(pe);

		/* Remove from list */
		mutex_lock(&phb->ioda.pe_list_mutex);
		list_del(&pe->list);
		mutex_unlock(&phb->ioda.pe_list_mutex);

		pnv_ioda_deconfigure_pe(phb, pe);

		pnv_ioda_free_pe(pe);
	}
}

static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
{
	struct resource *res, res2;
	struct pnv_iov_data *iov;
	resource_size_t size;
	u16 num_vfs;
	int i;

	if (!dev->is_physfn)
		return -EINVAL;
	iov = pnv_iov_get(dev);

	/*
	 * "offset" is in VFs.  The M64 windows are sized so that when they
	 * are segmented, each segment is the same size as the IOV BAR.
	 * Each segment is in a separate PE, and the high order bits of the
	 * address are the PE number.  Therefore, each VF's BAR is in a
	 * separate PE, and changing the IOV BAR start address changes the
	 * range of PEs the VFs are in.
	 */
	num_vfs = iov->num_vfs;
	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
		res = &dev->resource[i + PCI_IOV_RESOURCES];
		if (!res->flags || !res->parent)
			continue;

		/*
		 * The actual IOV BAR range is determined by the start address
		 * and the actual size for num_vfs VFs BAR.  This check is to
		 * make sure that after shifting, the range will not overlap
		 * with another device.
		 */
		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
		res2.flags = res->flags;
		res2.start = res->start + (size * offset);
		res2.end = res2.start + (size * num_vfs) - 1;

		if (res2.end > res->end) {
			dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
				i, &res2, res, num_vfs, offset);
			return -EBUSY;
		}
	}

	/*
	 * Since M64 BAR shares segments among all possible 256 PEs,
	 * we have to shift the beginning of PF IOV BAR to make it start from
	 * the segment which belongs to the PE number assigned to the first VF.
	 * This creates a "hole" in the /proc/iomem which could be used for
	 * allocating other resources so we reserve this area below and
	 * release when IOV is released.
	 */
	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
		res = &dev->resource[i + PCI_IOV_RESOURCES];
		if (!res->flags || !res->parent)
			continue;

		size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
		res2 = *res;
		res->start += size * offset;

		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
			 i, &res2, res, (offset > 0) ? "En" : "Dis",
			 num_vfs, offset);

		if (offset < 0) {
			devm_release_resource(&dev->dev, &iov->holes[i]);
			memset(&iov->holes[i], 0, sizeof(iov->holes[i]));
		}

		pci_update_resource(dev, i + PCI_IOV_RESOURCES);

		if (offset > 0) {
			iov->holes[i].start = res2.start;
			iov->holes[i].end = res2.start + size * offset - 1;
			iov->holes[i].flags = IORESOURCE_BUS;
			iov->holes[i].name = "pnv_iov_reserved";
			devm_request_resource(&dev->dev, res->parent,
					&iov->holes[i]);
		}
	}
	return 0;
}

static void pnv_pci_sriov_disable(struct pci_dev *pdev)
{
	struct pnv_phb        *phb;
	struct pnv_ioda_pe    *pe;
	struct pnv_iov_data   *iov;
	u16                    num_vfs, i;

	phb = pci_bus_to_pnvhb(pdev->bus);
	iov = pnv_iov_get(pdev);
	num_vfs = iov->num_vfs;

	/* Release VF PEs */
	pnv_ioda_release_vf_PE(pdev);

	if (phb->type == PNV_PHB_IODA2) {
		if (!iov->m64_single_mode)
			pnv_pci_vf_resource_shift(pdev, -*iov->pe_num_map);

		/* Release M64 windows */
		pnv_pci_vf_release_m64(pdev, num_vfs);

		/* Release PE numbers */
		if (iov->m64_single_mode) {
			for (i = 0; i < num_vfs; i++) {
				if (iov->pe_num_map[i] == IODA_INVALID_PE)
					continue;

				pe = &phb->ioda.pe_array[iov->pe_num_map[i]];
				pnv_ioda_free_pe(pe);
			}
		} else
			bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);
		/* Releasing pe_num_map */
		kfree(iov->pe_num_map);
	}
}

static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
{
	struct pnv_phb        *phb;
	struct pnv_ioda_pe    *pe;
	int                    pe_num;
	u16                    vf_index;
	struct pnv_iov_data   *iov;
	struct pci_dn         *pdn;

	if (!pdev->is_physfn)
		return;

	phb = pci_bus_to_pnvhb(pdev->bus);
	pdn = pci_get_pdn(pdev);
	iov = pnv_iov_get(pdev);

	/* Reserve PE for each VF */
	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
		int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
		int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
		struct pci_dn *vf_pdn;

		if (iov->m64_single_mode)
			pe_num = iov->pe_num_map[vf_index];
		else
			pe_num = *iov->pe_num_map + vf_index;

		pe = &phb->ioda.pe_array[pe_num];
		pe->pe_number = pe_num;
		pe->phb = phb;
		pe->flags = PNV_IODA_PE_VF;
		pe->pbus = NULL;
		pe->parent_dev = pdev;
		pe->mve_number = -1;
		pe->rid = (vf_bus << 8) | vf_devfn;

		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
			pci_domain_nr(pdev->bus), pdev->bus->number,
			PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);

		if (pnv_ioda_configure_pe(phb, pe)) {
			/* XXX What do we do here ? */
			pnv_ioda_free_pe(pe);
			pe->pdev = NULL;
			continue;
		}

		/* Put PE to the list */
		mutex_lock(&phb->ioda.pe_list_mutex);
		list_add_tail(&pe->list, &phb->ioda.pe_list);
		mutex_unlock(&phb->ioda.pe_list_mutex);

		/* associate this pe to it's pdn */
		list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
			if (vf_pdn->busno == vf_bus &&
			    vf_pdn->devfn == vf_devfn) {
				vf_pdn->pe_number = pe_num;
				break;
			}
		}

		pnv_pci_ioda2_setup_dma_pe(phb, pe);
	}
}

static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
	struct pnv_iov_data   *iov;
	struct pnv_phb        *phb;
	struct pnv_ioda_pe    *pe;
	int                    ret;
	u16                    i;

	phb = pci_bus_to_pnvhb(pdev->bus);
	iov = pnv_iov_get(pdev);

	if (phb->type == PNV_PHB_IODA2) {
		if (!iov->vfs_expanded) {
			dev_info(&pdev->dev,
				 "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n");
			return -ENOSPC;
		}

		/*
		 * When M64 BARs functions in Single PE mode, the number of VFs
		 * could be enabled must be less than the number of M64 BARs.
		 */
		if (iov->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
			dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
			return -EBUSY;
		}

		/* Allocating pe_num_map */
		if (iov->m64_single_mode)
			iov->pe_num_map = kmalloc_array(num_vfs,
							sizeof(*iov->pe_num_map),
							GFP_KERNEL);
		else
			iov->pe_num_map = kmalloc(sizeof(*iov->pe_num_map), GFP_KERNEL);

		if (!iov->pe_num_map)
			return -ENOMEM;

		if (iov->m64_single_mode)
			for (i = 0; i < num_vfs; i++)
				iov->pe_num_map[i] = IODA_INVALID_PE;

		/* Calculate available PE for required VFs */
		if (iov->m64_single_mode) {
			for (i = 0; i < num_vfs; i++) {
				pe = pnv_ioda_alloc_pe(phb);
				if (!pe) {
					ret = -EBUSY;
					goto m64_failed;
				}

				iov->pe_num_map[i] = pe->pe_number;
			}
		} else {
			mutex_lock(&phb->ioda.pe_alloc_mutex);
			*iov->pe_num_map = bitmap_find_next_zero_area(
				phb->ioda.pe_alloc, phb->ioda.total_pe_num,
				0, num_vfs, 0);
			if (*iov->pe_num_map >= phb->ioda.total_pe_num) {
				mutex_unlock(&phb->ioda.pe_alloc_mutex);
				dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
				kfree(iov->pe_num_map);
				return -EBUSY;
			}
			bitmap_set(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);
			mutex_unlock(&phb->ioda.pe_alloc_mutex);
		}
		iov->num_vfs = num_vfs;

		/* Assign M64 window accordingly */
		ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
		if (ret) {
			dev_info(&pdev->dev, "Not enough M64 window resources\n");
			goto m64_failed;
		}

		/*
		 * When using one M64 BAR to map one IOV BAR, we need to shift
		 * the IOV BAR according to the PE# allocated to the VFs.
		 * Otherwise, the PE# for the VF will conflict with others.
		 */
		if (!iov->m64_single_mode) {
			ret = pnv_pci_vf_resource_shift(pdev, *iov->pe_num_map);
			if (ret)
				goto m64_failed;
		}
	}

	/* Setup VF PEs */
	pnv_ioda_setup_vf_PE(pdev, num_vfs);

	return 0;

m64_failed:
	if (iov->m64_single_mode) {
		for (i = 0; i < num_vfs; i++) {
			if (iov->pe_num_map[i] == IODA_INVALID_PE)
				continue;

			pe = &phb->ioda.pe_array[iov->pe_num_map[i]];
			pnv_ioda_free_pe(pe);
		}
	} else
		bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);

	/* Releasing pe_num_map */
	kfree(iov->pe_num_map);

	return ret;
}

int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
{
	pnv_pci_sriov_disable(pdev);

	/* Release PCI data */
	remove_sriov_vf_pdns(pdev);
	return 0;
}

int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
	/* Allocate PCI data */
	add_sriov_vf_pdns(pdev);

	return pnv_pci_sriov_enable(pdev, num_vfs);
}