1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
// SPDX-License-Identifier: GPL-2.0
/*
* Author:
* Chuanhong Guo <gch981213@gmail.com>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_GIGADEVICE 0xC8
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 8;
region->length = 8;
return 0;
}
static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
if (section) {
region->offset = 16 * section;
region->length = 8;
} else {
/* section 0 has one byte reserved for bad block mark */
region->offset = 1;
region->length = 7;
}
return 0;
}
static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
/* 1-7 bits are flipped. return the maximum. */
return 7;
case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
return 8;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
.ecc = gd5fxgq4xa_ooblayout_ecc,
.free = gd5fxgq4xa_ooblayout_free,
};
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4xA", 0xF2,
NAND_MEMORG(1, 2048, 64, 64, 2048, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
NAND_MEMORG(1, 2048, 64, 64, 4096, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
};
static int gigadevice_spinand_detect(struct spinand_device *spinand)
{
u8 *id = spinand->id.data;
int ret;
/*
* For GD NANDs, There is an address byte needed to shift in before IDs
* are read out, so the first byte in raw_id is dummy.
*/
if (id[1] != SPINAND_MFR_GIGADEVICE)
return 0;
ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
ARRAY_SIZE(gigadevice_spinand_table),
id[2]);
if (ret)
return ret;
return 1;
}
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
.detect = gigadevice_spinand_detect,
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
.id = SPINAND_MFR_GIGADEVICE,
.name = "GigaDevice",
.ops = &gigadevice_spinand_manuf_ops,
};
|