summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
blob: 18b08277d2e1a8b3f3235c0748356e58bae1c614 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
 *
 * Copyright (C) 2014-2019 aQuantia Corporation
 * Copyright (C) 2019-2020 Marvell International Ltd.
 */

/* File aq_hw_utils.c: Definitions of helper functions used across
 * hardware layer.
 */

#include "aq_hw_utils.h"

#include <linux/io-64-nonatomic-lo-hi.h>

#include "aq_hw.h"
#include "aq_nic.h"
#include "hw_atl/hw_atl_llh.h"

void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
			 u32 shift, u32 val)
{
	if (msk ^ ~0) {
		u32 reg_old, reg_new;

		reg_old = aq_hw_read_reg(aq_hw, addr);
		reg_new = (reg_old & (~msk)) | (val << shift);

		if (reg_old != reg_new)
			aq_hw_write_reg(aq_hw, addr, reg_new);
	} else {
		aq_hw_write_reg(aq_hw, addr, val);
	}
}

u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
{
	return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
}

u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
{
	u32 value = readl(hw->mmio + reg);

	if (value == U32_MAX &&
	    readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
		aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);

	return value;
}

void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
{
	writel(value, hw->mmio + reg);
}

/* Most of 64-bit registers are in LSW, MSW form.
   Counters are normally implemented by HW as latched pairs:
   reading LSW first locks MSW, to overcome LSW overflow
 */
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
{
	u64 value = U64_MAX;

	if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
		value = readq(hw->mmio + reg);
	else
		value = lo_hi_readq(hw->mmio + reg);

	if (value == U64_MAX &&
	    readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
		aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);

	return value;
}

void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
{
	if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
		writeq(value, hw->mmio + reg);
	else
		lo_hi_writeq(value, hw->mmio + reg);
}

int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
{
	int err;
	u32 val;

	/* Invalidate Descriptor Cache to prevent writing to the cached
	 * descriptors and to the data pointer of those descriptors
	 */
	hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);

	err = aq_hw_err_from_flags(hw);
	if (err)
		goto err_exit;

	readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
				  hw, val, val == 1, 1000U, 10000U);

err_exit:
	return err;
}

int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
	int err = 0;

	if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
		err = -ENXIO;
		goto err_exit;
	}
	if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
		err = -EIO;
		goto err_exit;
	}

err_exit:
	return err;
}

int aq_hw_num_tcs(struct aq_hw_s *hw)
{
	switch (hw->aq_nic_cfg->tc_mode) {
	case AQ_TC_MODE_8TCS:
		return 8;
	case AQ_TC_MODE_4TCS:
		return 4;
	default:
		break;
	}

	return 1;
}

int aq_hw_q_per_tc(struct aq_hw_s *hw)
{
	switch (hw->aq_nic_cfg->tc_mode) {
	case AQ_TC_MODE_8TCS:
		return 4;
	case AQ_TC_MODE_4TCS:
		return 8;
	default:
		return 4;
	}
}