summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/auxdisplay/Kconfig5
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h44
-rw-r--r--drivers/auxdisplay/hd44780.c3
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c11
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c22
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c4
-rw-r--r--drivers/greybus/Kconfig32
-rw-r--r--drivers/greybus/Makefile26
-rw-r--r--drivers/greybus/arpc.h63
-rw-r--r--drivers/greybus/bundle.c (renamed from drivers/staging/greybus/bundle.c)2
-rw-r--r--drivers/greybus/connection.c (renamed from drivers/staging/greybus/connection.c)2
-rw-r--r--drivers/greybus/control.c (renamed from drivers/staging/greybus/control.c)2
-rw-r--r--drivers/greybus/core.c (renamed from drivers/staging/greybus/core.c)2
-rw-r--r--drivers/greybus/debugfs.c (renamed from drivers/staging/greybus/debugfs.c)3
-rw-r--r--drivers/greybus/es2.c (renamed from drivers/staging/greybus/es2.c)3
-rw-r--r--drivers/greybus/greybus_trace.h (renamed from drivers/staging/greybus/greybus_trace.h)2
-rw-r--r--drivers/greybus/hd.c (renamed from drivers/staging/greybus/hd.c)12
-rw-r--r--drivers/greybus/interface.c (renamed from drivers/staging/greybus/interface.c)2
-rw-r--r--drivers/greybus/manifest.c (renamed from drivers/staging/greybus/manifest.c)41
-rw-r--r--drivers/greybus/module.c (renamed from drivers/staging/greybus/module.c)2
-rw-r--r--drivers/greybus/operation.c (renamed from drivers/staging/greybus/operation.c)2
-rw-r--r--drivers/greybus/svc.c (renamed from drivers/staging/greybus/svc.c)3
-rw-r--r--drivers/greybus/svc_watchdog.c (renamed from drivers/staging/greybus/svc_watchdog.c)2
-rw-r--r--drivers/hv/hv_trace.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.h2
-rw-r--r--drivers/hwtracing/intel_th/pti.h2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c18
-rw-r--r--drivers/i2c/busses/i2c-rcar.c11
-rw-r--r--drivers/i2c/busses/i2c-stm32.h2
-rw-r--r--drivers/iio/adc/max9611.c2
-rw-r--r--drivers/iio/frequency/adf4371.c8
-rw-r--r--drivers/infiniband/core/counters.c6
-rw-r--r--drivers/infiniband/core/nldev.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c11
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c24
-rw-r--r--drivers/infiniband/sw/siw/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c16
-rw-r--r--drivers/iommu/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/dma-iommu.c25
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c2
-rw-r--r--drivers/iommu/intel-iommu.c11
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/habanalabs/device.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c72
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/habanalabs.h9
-rw-r--r--drivers/misc/habanalabs/hw_queue.c14
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h13
-rw-r--r--drivers/misc/habanalabs/irq.c27
-rw-r--r--drivers/misc/habanalabs/memory.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/multipath.c76
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c16
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h3
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c23
-rw-r--r--drivers/soundwire/Kconfig7
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/android/TODO2
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/staging/erofs/Documentation/filesystems/erofs.txt223
-rw-r--r--drivers/staging/erofs/Kconfig98
-rw-r--r--drivers/staging/erofs/Makefile13
-rw-r--r--drivers/staging/erofs/TODO46
-rw-r--r--drivers/staging/erofs/compress.h62
-rw-r--r--drivers/staging/erofs/data.c425
-rw-r--r--drivers/staging/erofs/decompressor.c360
-rw-r--r--drivers/staging/erofs/dir.c148
-rw-r--r--drivers/staging/erofs/erofs_fs.h317
-rw-r--r--drivers/staging/erofs/include/trace/events/erofs.h256
-rw-r--r--drivers/staging/erofs/inode.c334
-rw-r--r--drivers/staging/erofs/internal.h554
-rw-r--r--drivers/staging/erofs/namei.c253
-rw-r--r--drivers/staging/erofs/super.c666
-rw-r--r--drivers/staging/erofs/tagptr.h110
-rw-r--r--drivers/staging/erofs/utils.c335
-rw-r--r--drivers/staging/erofs/xattr.c705
-rw-r--r--drivers/staging/erofs/xattr.h94
-rw-r--r--drivers/staging/erofs/zdata.c1405
-rw-r--r--drivers/staging/erofs/zdata.h195
-rw-r--r--drivers/staging/erofs/zmap.c463
-rw-r--r--drivers/staging/erofs/zpvec.h159
-rw-r--r--drivers/staging/exfat/Kconfig49
-rw-r--r--drivers/staging/exfat/Makefile10
-rw-r--r--drivers/staging/exfat/TODO12
-rw-r--r--drivers/staging/exfat/exfat.h971
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c136
-rw-r--r--drivers/staging/exfat/exfat_cache.c724
-rw-r--r--drivers/staging/exfat/exfat_core.c3703
-rw-r--r--drivers/staging/exfat/exfat_nls.c404
-rw-r--r--drivers/staging/exfat/exfat_super.c4146
-rw-r--r--drivers/staging/exfat/exfat_upcase.c740
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c12
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c46
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c46
-rw-r--r--drivers/staging/greybus/Kconfig27
-rw-r--r--drivers/staging/greybus/Makefile22
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/arpc.h109
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c3
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h26
-rw-r--r--drivers/staging/greybus/audio_codec.h4
-rw-r--r--drivers/staging/greybus/audio_gb.c4
-rw-r--r--drivers/staging/greybus/authentication.c3
-rw-r--r--drivers/staging/greybus/bootrom.c2
-rw-r--r--drivers/staging/greybus/bundle.h89
-rw-r--r--drivers/staging/greybus/camera.c2
-rw-r--r--drivers/staging/greybus/connection.h128
-rw-r--r--drivers/staging/greybus/control.h57
-rw-r--r--drivers/staging/greybus/firmware.h4
-rw-r--r--drivers/staging/greybus/fw-core.c2
-rw-r--r--drivers/staging/greybus/fw-download.c2
-rw-r--r--drivers/staging/greybus/fw-management.c2
-rw-r--r--drivers/staging/greybus/gb-camera.h2
-rw-r--r--drivers/staging/greybus/gbphy.c2
-rw-r--r--drivers/staging/greybus/gbphy.h2
-rw-r--r--drivers/staging/greybus/gpio.c2
-rw-r--r--drivers/staging/greybus/greybus.h152
-rw-r--r--drivers/staging/greybus/greybus_authentication.h48
-rw-r--r--drivers/staging/greybus/greybus_firmware.h48
-rw-r--r--drivers/staging/greybus/greybus_id.h27
-rw-r--r--drivers/staging/greybus/greybus_manifest.h178
-rw-r--r--drivers/staging/greybus/greybus_protocols.h2222
-rw-r--r--drivers/staging/greybus/hd.h82
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/i2c.c2
-rw-r--r--drivers/staging/greybus/interface.h82
-rw-r--r--drivers/staging/greybus/light.c16
-rw-r--r--drivers/staging/greybus/log.c9
-rw-r--r--drivers/staging/greybus/loopback.c9
-rw-r--r--drivers/staging/greybus/manifest.h15
-rw-r--r--drivers/staging/greybus/module.h33
-rw-r--r--drivers/staging/greybus/operation.h224
-rw-r--r--drivers/staging/greybus/power_supply.c3
-rw-r--r--drivers/staging/greybus/pwm.c2
-rw-r--r--drivers/staging/greybus/raw.c3
-rw-r--r--drivers/staging/greybus/sdio.c2
-rw-r--r--drivers/staging/greybus/spi.c2
-rw-r--r--drivers/staging/greybus/spilib.c2
-rw-r--r--drivers/staging/greybus/spilib.h2
-rw-r--r--drivers/staging/greybus/svc.h101
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/greybus/vibrator.c3
-rw-r--r--drivers/staging/kpc2000/kpc2000/cell_probe.c18
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c18
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c4
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c8
-rw-r--r--drivers/staging/most/core.c4
-rw-r--r--drivers/staging/most/sound/sound.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c3
-rw-r--r--drivers/staging/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c12
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c206
-rw-r--r--drivers/staging/rtl8192u/Kconfig2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c204
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c230
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c142
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c43
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c18
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c11
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c101
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c59
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h7
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h3
-rw-r--r--drivers/staging/rtl8712/wifi.h8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c4
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h6
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h10
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h16
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h14
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c5
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c12
-rw-r--r--drivers/staging/rts5208/sd.c28
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/core/buffer.c10
-rw-r--r--drivers/usb/core/file.c10
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c28
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/serial/option.c10
249 files changed, 12296 insertions, 12298 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 61cf4ea2c229..7dce76ae7369 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -146,6 +146,8 @@ source "drivers/hv/Kconfig"
source "drivers/xen/Kconfig"
+source "drivers/greybus/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 6d37564e783c..73df8e5a2fce 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -148,6 +148,7 @@ obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
+obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index dd61fdd400f0..68489d1f00bb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
choice
prompt "Backlight initial state"
default CHARLCD_BL_FLASH
+ ---help---
+ Select the initial backlight state on boot or module load.
+
+ Previously, there was no option for this: the backlight flashed
+ briefly on init. Now you can also turn it off/on.
config CHARLCD_BL_OFF
bool "Off"
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 92745efefb54..bef6b85778b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -20,7 +20,7 @@
#include <generated/utsrelease.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define LCD_MINOR 156
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
new file mode 100644
index 000000000000..00911ad0f3de
--- /dev/null
+++ b/drivers/auxdisplay/charlcd.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ */
+
+#ifndef _CHARLCD_H
+#define _CHARLCD_H
+
+struct charlcd {
+ const struct charlcd_ops *ops;
+ const unsigned char *char_conv; /* Optional */
+
+ int ifwidth; /* 4-bit or 8-bit (default) */
+ int height;
+ int width;
+ int bwidth; /* Default set by charlcd_alloc() */
+ int hwidth; /* Default set by charlcd_alloc() */
+
+ void *drvdata; /* Set by charlcd_alloc() */
+};
+
+struct charlcd_ops {
+ /* Required */
+ void (*write_cmd)(struct charlcd *lcd, int cmd);
+ void (*write_data)(struct charlcd *lcd, int data);
+
+ /* Optional */
+ void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */
+ void (*clear_fast)(struct charlcd *lcd);
+ void (*backlight)(struct charlcd *lcd, int on);
+};
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
+
+int charlcd_register(struct charlcd *lcd);
+int charlcd_unregister(struct charlcd *lcd);
+
+void charlcd_poke(struct charlcd *lcd);
+
+#endif /* CHARLCD_H */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index ab15b64707ad..bcbe13092327 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -14,8 +14,7 @@
#include <linux/property.h>
#include <linux/slab.h>
-#include <misc/charlcd.h>
-
+#include "charlcd.h"
enum hd44780_pin {
/* Order does matter due to writing to GPIO array subsets! */
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e06de63497cf..85965953683e 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -55,7 +55,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define KEYPAD_MINOR 185
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
return;
err_lcd_unreg:
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index a4984136c19d..0fd6f97ee523 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
- depends on SOUNDWIRE_BUS
+ depends on SOUNDWIRE
config REGMAP_SCCB
tristate
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3ac6a5d18071..b90dbcd99c03 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
}
}
+ err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
- return err;
+ goto fail;
}
return 0;
@@ -1007,8 +1008,7 @@ fail:
}
kfree(req);
}
- return -ENOMEM;
-
+ return err;
}
static int connect_ring(struct backend_info *be)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8dda62367816..c28ebf2810f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
}
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
- if (ret)
+ if (ret < 0)
break;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
struct dw_edma_region {
phys_addr_t paddr;
- dma_addr_t vaddr;
+ void __iomem *vaddr;
size_t sz;
};
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz;
- dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+ dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz;
- dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+ dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
- &dw->rg_region.vaddr, &dw->rg_region.paddr);
+ dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- &dw->ll_region.vaddr, &dw->ll_region.paddr);
+ dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- &dw->dt_region.vaddr, &dw->dt_region.paddr);
+ dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+ return dw->rg_region.vaddr;
}
#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_v0_lli *lli;
- struct dw_edma_v0_llp *llp;
+ struct dw_edma_v0_lli __iomem *lli;
+ struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
- u64 sar, dar, addr;
int j;
- lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+ lli = chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */
- sar = cpu_to_le64(child->sar);
- SET_LL(&lli[i].sar_low, lower_32_bits(sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+ SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
+ SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
/* DAR - low, high */
- dar = cpu_to_le64(child->dar);
- SET_LL(&lli[i].dar_low, lower_32_bits(dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+ SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
+ SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
i++;
}
- llp = (struct dw_edma_v0_llp *)&lli[i];
+ llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Channel control */
SET_LL(&llp->control, control);
/* Linked list - low, high */
- addr = cpu_to_le64(chunk->ll_region.paddr);
- SET_LL(&llp->llp_low, lower_32_bits(addr));
- SET_LL(&llp->llp_high, upper_32_bits(addr));
+ SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
- u64 llp;
dw_edma_v0_core_write_chunk(chunk);
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */
- llp = cpu_to_le64(chunk->ll_region.paddr);
- SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
- SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+ SET_CH(dw, chan->dir, chan->id, llp_low,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH(dw, chan->dir, chan->id, llp_high,
+ upper_32_bits(chunk->ll_region.paddr));
}
/* Doorbell */
SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
- ((dma_addr_t *)&regs->name)
+ ((void __force *)&regs->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
@@ -40,36 +40,37 @@
static struct dentry *base_dir;
static struct dw_edma *dw;
-static struct dw_edma_v0_regs *regs;
+static struct dw_edma_v0_regs __iomem *regs;
static struct {
- void *start;
- void *end;
+ void __iomem *start;
+ void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
- char name[24];
+ const char *name;
dma_addr_t *reg;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
+ void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY &&
- data >= (void *)&regs->type.legacy.ch) {
- void *ptr = (void *)&regs->type.legacy.ch;
+ reg >= (void __iomem *)&regs->type.legacy.ch) {
+ void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
unsigned long flags;
u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= data && data < lim[0][ch].end) {
- ptr += (data - lim[0][ch].start);
+ if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
+ ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= data && data < lim[1][ch].end) {
- ptr += (data - lim[1][ch].start);
+ if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
+ ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd;
}
@@ -86,7 +87,7 @@ legacy_sel_wr:
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
- *val = readl(data);
+ *val = readl(reg);
}
return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir)
{
int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw)
return;
- regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+ regs = dw->rg_region.vaddr;
if (!regs)
return;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..1311de74bfdd 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id];
if (!chan) {
- dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
goto exit;
}
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..b33cf6e8ab8e 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
return chan;
}
-static int tegra_adma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +744,7 @@ clk_disable:
return 0;
}
-static int tegra_adma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..ba27802efcd0 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (src_icg) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1;
- d->fi = src_icg;
+ d->fi = src_icg + 1;
} else if (xt->src_inc) {
d->ccr |= CCR_SRC_AMODE_POSTINC;
d->fi = 0;
@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (dst_icg) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
sg->ei = 1;
- sg->fi = dst_icg;
+ sg->fi = dst_icg + 1;
} else if (xt->dst_inc) {
d->ccr |= CCR_DST_AMODE_POSTINC;
sg->fi = 0;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 1db780c0f07b..3caae7f2cf56 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -927,17 +927,33 @@ fail:
return status;
}
+#define GET_EFI_CONFIG_TABLE(bits) \
+static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
+ efi_guid_t guid) \
+{ \
+ efi_system_table_##bits##_t *sys_table; \
+ efi_config_table_##bits##_t *tables; \
+ int i; \
+ \
+ sys_table = (typeof(sys_table))_sys_table; \
+ tables = (typeof(tables))(unsigned long)sys_table->tables; \
+ \
+ for (i = 0; i < sys_table->nr_tables; i++) { \
+ if (efi_guidcmp(tables[i].guid, guid) != 0) \
+ continue; \
+ \
+ return (void *)(unsigned long)tables[i].table; \
+ } \
+ \
+ return NULL; \
+}
+GET_EFI_CONFIG_TABLE(32)
+GET_EFI_CONFIG_TABLE(64)
+
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
{
- efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables;
- int i;
-
- for (i = 0; i < sys_table->nr_tables; i++) {
- if (efi_guidcmp(tables[i].guid, guid) != 0)
- continue;
-
- return (void *)tables[i].table;
- }
-
- return NULL;
+ if (efi_is_64bit())
+ return get_efi_config_table64(sys_table, guid);
+ else
+ return get_efi_config_table32(sys_table, guid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1cf639a51178..04b8ac4432c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- WREG32(mmSQ_CMD, value);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fa20201eef3a..cbc480a33376 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -23,6 +23,7 @@
*/
#include <linux/slab.h>
+#include <linux/mm.h>
#include "dm_services.h"
@@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
struct dc_state *dc_create_state(struct dc *dc)
{
- struct dc_state *context = kzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+ struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
if (!context)
return NULL;
@@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
- struct dc_state *new_ctx = kmemdup(src_ctx,
- sizeof(struct dc_state), GFP_KERNEL);
+ struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
if (!new_ctx)
return NULL;
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
{
struct dc_state *context = container_of(kref, struct dc_state, refcount);
dc_resource_state_destruct(context);
- kfree(context);
+ kvfree(context);
}
void dc_release_state(struct dc_state *context)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..a5d1494a3dc4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
/* Enable extended register access */
- ast_enable_mmio(dev);
ast_open_key(ast);
+ ast_enable_mmio(dev);
/* Find out whether P2A works or whether to use device-tree */
ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ /* enable standard VGA decode */
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..a1cb020e07e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
ast_open_key(ast);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..c1d1ac51d1c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9f3fd7d96a69..75baff657e43 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.indirect_ctx.guest_gma,
workload->wa_ctx.indirect_ctx.size)) {
- kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
workload->wa_ctx.indirect_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
return ERR_PTR(-EINVAL);
}
}
@@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.per_ctx.guest_gma,
CACHELINE_BYTES)) {
- kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
workload->wa_ctx.per_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
return ERR_PTR(-EINVAL);
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 126703816794..5c36c75232e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
- /* When restoring duplicated states, we need to make sure that the
- * bw remains the same and avoid recalculating it, as the connector's
- * bpc may have changed after the state was duplicated
- */
- if (!state->duplicated)
- asyh->dp.pbn =
- drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
- connector->display_info.bpc * 3);
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ /*
+ * When restoring duplicated states, we need to make sure that
+ * the bw remains the same and avoid recalculating it, as the
+ * connector's bpc may have changed after the state was
+ * duplicated
+ */
+ if (!state->duplicated) {
+ const int bpp = connector->display_info.bpc * 3;
+ const int clock = crtc_state->adjusted_mode.clock;
+
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+ }
- if (crtc_state->mode_changed) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port,
asyh->dp.pbn);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..671c90f34ede 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
- spsc_queue_peek(&entity->job_queue) == NULL)
+ spsc_queue_count(&entity->job_queue) == 0)
return true;
return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
*/
- if (spsc_queue_peek(&entity->job_queue)) {
+ if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
new file mode 100644
index 000000000000..b84fcaf8b105
--- /dev/null
+++ b/drivers/greybus/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig GREYBUS
+ tristate "Greybus support"
+ depends on SYSFS
+ ---help---
+ This option enables the Greybus driver core. Greybus is an
+ hardware protocol that was designed to provide Unipro with a
+ sane application layer. It was originally designed for the
+ ARA project, a module phone system, but has shown up in other
+ phones, and can be tunneled over other busses in order to
+ control hardware devices.
+
+ Say Y here to enable support for these types of drivers.
+
+ To compile this code as a module, chose M here: the module
+ will be called greybus.ko
+
+if GREYBUS
+
+config GREYBUS_ES2
+ tristate "Greybus ES3 USB host controller"
+ depends on USB
+ ---help---
+ Select this option if you have a Toshiba ES3 USB device that
+ acts as a Greybus "host controller". This device is a bridge
+ from a USB device to a Unipro network.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-es2.ko
+
+endif # GREYBUS
+
diff --git a/drivers/greybus/Makefile b/drivers/greybus/Makefile
new file mode 100644
index 000000000000..9bccdd229aa2
--- /dev/null
+++ b/drivers/greybus/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+# Greybus core
+greybus-y := core.o \
+ debugfs.o \
+ hd.o \
+ manifest.o \
+ module.o \
+ interface.o \
+ bundle.o \
+ connection.o \
+ control.o \
+ svc.o \
+ svc_watchdog.o \
+ operation.o
+
+obj-$(CONFIG_GREYBUS) += greybus.o
+
+# needed for trace events
+ccflags-y += -I$(src)
+
+# Greybus Host controller drivers
+gb-es2-y := es2.o
+
+obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
+
+
diff --git a/drivers/greybus/arpc.h b/drivers/greybus/arpc.h
new file mode 100644
index 000000000000..c8b83c5cfa79
--- /dev/null
+++ b/drivers/greybus/arpc.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __ARPC_H
+#define __ARPC_H
+
+/* APBridgeA RPC (ARPC) */
+
+enum arpc_result {
+ ARPC_SUCCESS = 0x00,
+ ARPC_NO_MEMORY = 0x01,
+ ARPC_INVALID = 0x02,
+ ARPC_TIMEOUT = 0x03,
+ ARPC_UNKNOWN_ERROR = 0xff,
+};
+
+struct arpc_request_message {
+ __le16 id; /* RPC unique id */
+ __le16 size; /* Size in bytes of header + payload */
+ __u8 type; /* RPC type */
+ __u8 data[0]; /* ARPC data */
+} __packed;
+
+struct arpc_response_message {
+ __le16 id; /* RPC unique id */
+ __u8 result; /* Result of RPC */
+} __packed;
+
+/* ARPC requests */
+#define ARPC_TYPE_CPORT_CONNECTED 0x01
+#define ARPC_TYPE_CPORT_QUIESCE 0x02
+#define ARPC_TYPE_CPORT_CLEAR 0x03
+#define ARPC_TYPE_CPORT_FLUSH 0x04
+#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
+
+struct arpc_cport_connected_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_quiesce_req {
+ __le16 cport_id;
+ __le16 peer_space;
+ __le16 timeout;
+} __packed;
+
+struct arpc_cport_clear_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_flush_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_shutdown_req {
+ __le16 cport_id;
+ __le16 timeout;
+ __u8 phase;
+} __packed;
+
+#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/bundle.c b/drivers/greybus/bundle.c
index 3f702db9e098..84660729538b 100644
--- a/drivers/staging/greybus/bundle.c
+++ b/drivers/greybus/bundle.c
@@ -6,7 +6,7 @@
* Copyright 2014-2015 Linaro Ltd.
*/
-#include "greybus.h"
+#include <linux/greybus.h>
#include "greybus_trace.h"
static ssize_t bundle_class_show(struct device *dev,
diff --git a/drivers/staging/greybus/connection.c b/drivers/greybus/connection.c
index eda964208cce..fc8f57f97ce6 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -7,8 +7,8 @@
*/
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
diff --git a/drivers/staging/greybus/control.c b/drivers/greybus/control.c
index a9e8b6036cac..359a25841973 100644
--- a/drivers/staging/greybus/control.c
+++ b/drivers/greybus/control.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include "greybus.h"
+#include <linux/greybus.h>
/* Highest control-protocol version supported */
#define GB_CONTROL_VERSION_MAJOR 0
diff --git a/drivers/staging/greybus/core.c b/drivers/greybus/core.c
index d6b0d49130c0..e546c6431877 100644
--- a/drivers/staging/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -9,7 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define CREATE_TRACE_POINTS
-#include "greybus.h"
+#include <linux/greybus.h>
#include "greybus_trace.h"
#define GB_BUNDLE_AUTOSUSPEND_MS 3000
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/greybus/debugfs.c
index 56e20c30feb5..e102d7badb9d 100644
--- a/drivers/staging/greybus/debugfs.c
+++ b/drivers/greybus/debugfs.c
@@ -7,8 +7,7 @@
*/
#include <linux/debugfs.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
static struct dentry *gb_debug_root;
diff --git a/drivers/staging/greybus/es2.c b/drivers/greybus/es2.c
index be6af18cec31..366716f11b1a 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -11,12 +11,11 @@
#include <linux/kfifo.h>
#include <linux/debugfs.h>
#include <linux/list.h>
+#include <linux/greybus.h>
#include <asm/unaligned.h>
#include "arpc.h"
-#include "greybus.h"
#include "greybus_trace.h"
-#include "connection.h"
/* Default timeout for USB vendor requests. */
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/greybus/greybus_trace.h
index 7b5e2c6b1f6b..1bc9f1275c65 100644
--- a/drivers/staging/greybus/greybus_trace.h
+++ b/drivers/greybus/greybus_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus driver and device API
*
diff --git a/drivers/staging/greybus/hd.c b/drivers/greybus/hd.c
index 969f86697673..72b21bf2d7d3 100644
--- a/drivers/staging/greybus/hd.c
+++ b/drivers/greybus/hd.c
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
@@ -31,7 +31,7 @@ int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
EXPORT_SYMBOL_GPL(gb_hd_output);
static ssize_t bus_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct gb_host_device *hd = to_gb_host_device(dev);
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
/* Locking: Caller guarantees serialisation */
int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
- unsigned long flags)
+ unsigned long flags)
{
struct ida *id_map = &hd->cport_id_map;
int ida_start, ida_end;
@@ -122,9 +122,9 @@ struct device_type greybus_hd_type = {
};
struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
- struct device *parent,
- size_t buffer_size_max,
- size_t num_cports)
+ struct device *parent,
+ size_t buffer_size_max,
+ size_t num_cports)
{
struct gb_host_device *hd;
int ret;
diff --git a/drivers/staging/greybus/interface.c b/drivers/greybus/interface.c
index d7b5b89a2f40..67dbe6fda9a1 100644
--- a/drivers/staging/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -7,8 +7,8 @@
*/
#include <linux/delay.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
diff --git a/drivers/staging/greybus/manifest.c b/drivers/greybus/manifest.c
index 08db49264f2b..dd7040697bde 100644
--- a/drivers/staging/greybus/manifest.c
+++ b/drivers/greybus/manifest.c
@@ -6,7 +6,7 @@
* Copyright 2014-2015 Linaro Ltd.
*/
-#include "greybus.h"
+#include <linux/greybus.h>
static const char *get_descriptor_type_string(u8 type)
{
@@ -104,15 +104,15 @@ static int identify_descriptor(struct gb_interface *intf,
size_t expected_size;
if (size < sizeof(*desc_header)) {
- dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
- size, sizeof(*desc_header));
+ dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size,
+ sizeof(*desc_header));
return -EINVAL; /* Must at least have header */
}
desc_size = le16_to_cpu(desc_header->size);
if (desc_size > size) {
dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
- desc_size, size);
+ desc_size, size);
return -EINVAL;
}
@@ -139,22 +139,22 @@ static int identify_descriptor(struct gb_interface *intf,
case GREYBUS_TYPE_INVALID:
default:
dev_err(&intf->dev, "invalid descriptor type (%u)\n",
- desc_header->type);
+ desc_header->type);
return -EINVAL;
}
if (desc_size < expected_size) {
dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
- get_descriptor_type_string(desc_header->type),
- desc_size, expected_size);
+ get_descriptor_type_string(desc_header->type),
+ desc_size, expected_size);
return -EINVAL;
}
/* Descriptor bigger than what we expect */
if (desc_size > expected_size) {
dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
- get_descriptor_type_string(desc_header->type),
- expected_size, desc_size);
+ get_descriptor_type_string(desc_header->type),
+ expected_size, desc_size);
}
descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
@@ -208,7 +208,7 @@ static char *gb_string_get(struct gb_interface *intf, u8 string_id)
/* Allocate an extra byte so we can guarantee it's NUL-terminated */
string = kmemdup(&desc_string->string, desc_string->length + 1,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!string)
return ERR_PTR(-ENOMEM);
string[desc_string->length] = '\0';
@@ -264,8 +264,7 @@ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
desc_cport = tmp->data;
if (cport_id == le16_to_cpu(desc_cport->id)) {
dev_err(&bundle->dev,
- "duplicate CPort %u found\n",
- cport_id);
+ "duplicate CPort %u found\n", cport_id);
goto exit;
}
}
@@ -277,7 +276,7 @@ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
return 0;
bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!bundle->cport_desc)
goto exit;
@@ -287,7 +286,7 @@ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
list_for_each_entry_safe(desc, next, &list, links) {
desc_cport = desc->data;
memcpy(&bundle->cport_desc[i++], desc_cport,
- sizeof(*desc_cport));
+ sizeof(*desc_cport));
/* Release the cport descriptor */
release_manifest_descriptor(desc);
@@ -333,9 +332,9 @@ static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
/* Ignore any legacy control bundles */
if (bundle_id == GB_CONTROL_BUNDLE_ID) {
dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
- __func__);
+ __func__);
release_cport_descriptors(&intf->manifest_descs,
- bundle_id);
+ bundle_id);
continue;
}
@@ -468,7 +467,7 @@ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
/* we have to have at _least_ the manifest header */
if (size < sizeof(*header)) {
dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
- size, sizeof(*header));
+ size, sizeof(*header));
return false;
}
@@ -478,15 +477,15 @@ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
manifest_size = le16_to_cpu(header->size);
if (manifest_size != size) {
dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
- size, manifest_size);
+ size, manifest_size);
return false;
}
/* Validate major/minor number */
if (header->version_major > GREYBUS_VERSION_MAJOR) {
dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
- header->version_major, header->version_minor,
- GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
+ header->version_major, header->version_minor,
+ GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
return false;
}
@@ -513,7 +512,7 @@ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
}
if (found != 1) {
dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
- found);
+ found);
result = false;
goto out;
}
diff --git a/drivers/staging/greybus/module.c b/drivers/greybus/module.c
index b251a53d0e8e..36f77f9e1d74 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/greybus/module.c
@@ -6,7 +6,7 @@
* Copyright 2016 Linaro Ltd.
*/
-#include "greybus.h"
+#include <linux/greybus.h>
#include "greybus_trace.h"
static ssize_t eject_store(struct device *dev,
diff --git a/drivers/staging/greybus/operation.c b/drivers/greybus/operation.c
index fe268f7b63ed..8459e9bc0749 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/greybus/operation.c
@@ -12,8 +12,8 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
static struct kmem_cache *gb_operation_cache;
diff --git a/drivers/staging/greybus/svc.c b/drivers/greybus/svc.c
index 05bc45287b87..ce7740ef449b 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -8,8 +8,7 @@
#include <linux/debugfs.h>
#include <linux/workqueue.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
#define SVC_INTF_EJECT_TIMEOUT 9000
#define SVC_INTF_ACTIVATE_TIMEOUT 6000
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/greybus/svc_watchdog.c
index 7868ad8211c5..b6b1682c19c4 100644
--- a/drivers/staging/greybus/svc_watchdog.c
+++ b/drivers/greybus/svc_watchdog.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
-#include "greybus.h"
+#include <linux/greybus.h>
#define SVC_WATCHDOG_PERIOD (2 * HZ)
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 999f80a63bff..e70783e33680 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hyperv
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 574c16004cb2..13d9b141daaa 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
*
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index e9381babc84c..7dfc0431333b 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub PTI output data structures
*
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 35b302d983e0..959d4912ec0d 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -69,6 +69,7 @@ struct em_i2c_device {
struct completion msg_done;
struct clk *sclk;
struct i2c_client *slave;
+ int irq;
};
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
writeb(0, priv->base + I2C_OFS_SVA0);
+ /*
+ * Wait for interrupt to finish. New slave irqs cannot happen because we
+ * cleared the slave address and, thus, only extension codes will be
+ * detected which do not use the slave ptr.
+ */
+ synchronize_irq(priv->irq);
priv->slave = NULL;
return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
{
struct em_i2c_device *priv;
struct resource *r;
- int irq, ret;
+ int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
em_i2c_reset(&priv->adap);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
+ priv->irq);
return 0;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b1b8b938d7f4..15f6cde6452f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
}
/* Functions for DMA support */
-static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
- dma_addr_t phy_addr)
+static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+ dma_addr_t phy_addr)
{
struct imx_i2c_dma *dma;
struct dma_slave_config dma_sconfig;
@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return -ENOMEM;
+ return;
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
- return 0;
+ return;
fail_rx:
dma_release_channel(dma->chan_rx);
@@ -336,8 +336,6 @@ fail_tx:
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
- /* return successfully if there is no dma support */
- return ret == -ENODEV ? 0 : ret;
}
static void i2c_imx_dma_callback(void *arg)
@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
i2c_imx->adapter.name);
+ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
/* Init DMA config if supported */
- ret = i2c_imx_dma_request(i2c_imx, phy_addr);
- if (ret < 0)
- goto del_adapter;
+ i2c_imx_dma_request(i2c_imx, phy_addr);
- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
-del_adapter:
- i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
rpm_disable:
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d39a4606f72d..531c01100b56 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
enum dma_data_direction dma_direction;
struct reset_control *rstc;
+ int irq;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
WARN_ON(!priv->slave);
+ /* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
+ synchronize_irq(priv->irq);
priv->slave = NULL;
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
struct i2c_timings i2c_t;
- int irq, ret;
+ int ret;
/* Otherwise logic will break because some bytes must always use PIO */
BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
@@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
pm_runtime_put(dev);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
if (ret < 0) {
- dev_err(dev, "cannot get irq %d\n", irq);
+ dev_err(dev, "cannot get irq %d\n", priv->irq);
goto out_pm_disable;
}
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index 868755f82f88..2c21893905a3 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* i2c-stm32.h
*
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 0e3c6529fc4c..da073d72f649 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
if (ret)
return ret;
- regval = ret & MAX9611_TEMP_MASK;
+ regval &= MAX9611_TEMP_MASK;
if ((regval > MAX9611_TEMP_MAX_POS &&
regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index e48f15cc9ab5..ff82863cbf42 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
st->buf[0] = st->integer >> 8;
st->buf[1] = 0x40; /* REG12 default */
st->buf[2] = 0x00;
- st->buf[3] = st->fract2 & 0xFF;
- st->buf[4] = st->fract2 >> 7;
- st->buf[5] = st->fract2 >> 15;
+ st->buf[3] = st->fract1 & 0xFF;
+ st->buf[4] = st->fract1 >> 8;
+ st->buf[5] = st->fract1 >> 16;
st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
- ADF4371_FRAC1WORD(st->fract1 >> 23);
+ ADF4371_FRAC1WORD(st->fract1 >> 24);
st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
st->buf[8] = st->mod2 & 0xFF;
st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 45d5164e9574..b79890739a2c 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
int ret;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
mutex_lock(&port_counter->lock);
if (on) {
ret = __counter_set_mode(&port_counter->mode,
@@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
+ if (!dev->port_data[port].port_counter.hstats)
+ return -EOPNOTSUPP;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 783e465e7c41..87d40d1ecdde 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
nlmsg_end(msg, nlh);
ib_device_put(device);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2a75c6f8d827..c0e15db34680 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
* prevent any further fault handling on this MR.
*/
ib_umem_notifier_start_account(umem_odp);
- umem_odp->dying = 1;
- /* Make sure that the fact the umem is dying is out before we release
- * all pending page faults. */
- smp_wmb();
complete_all(&umem_odp->notifier_completion);
umem_odp->umem.context->invalidate_range(
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ec4370f99381..af5bbb35c058 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
event_sub->eventfd =
eventfd_ctx_fdget(redirect_fd);
- if (IS_ERR(event_sub)) {
+ if (IS_ERR(event_sub->eventfd)) {
err = PTR_ERR(event_sub->eventfd);
event_sub->eventfd = NULL;
goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
- mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
/* delete the subscriptions which are related to this FD */
list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) {
- devx_cleanup_subscription(ev_file->dev, event_sub);
+ devx_cleanup_subscription(dev, event_sub);
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
kfree_rcu(event_sub, rcu);
}
- mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
}
uverbs_close_fd(filp);
- put_device(&ev_file->dev->ib_dev.dev);
+ put_device(&dev->ib_dev.dev);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 81da82050d05..1d257d1b3b0d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u32 flags)
{
int npages = 0, current_seq, page_shift, ret, np;
- bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
- implicit = true;
} else {
odp = odp_mr;
}
@@ -682,19 +680,15 @@ next_mr:
out:
if (ret == -EAGAIN) {
- if (implicit || !odp->dying) {
- unsigned long timeout =
- msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(
- &odp->notifier_completion,
- timeout)) {
- mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
- current_seq, odp->notifiers_seq, odp->notifiers_count);
- }
- } else {
- /* The MR is being killed, kill the QP as well. */
- ret = -EFAULT;
+ unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(&odp->notifier_completion,
+ timeout)) {
+ mlx5_ib_warn(
+ dev,
+ "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+ current_seq, odp->notifiers_seq,
+ odp->notifiers_count);
}
}
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index dace276aea14..b622fc62f2cd 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,6 @@
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
- depends on INET && INFINIBAND && LIBCRC32C && 64BIT
+ depends on INET && INFINIBAND && LIBCRC32C
select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 03fd7b2f595f..77b1aabf6ff3 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -214,7 +214,7 @@ struct siw_wqe {
struct siw_cq {
struct ib_cq base_cq;
spinlock_t lock;
- u64 *notify;
+ struct siw_cq_ctrl *notify;
struct siw_cqe *queue;
u32 cq_put;
u32 cq_get;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index d0f140daf659..05a92f997f60 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
out_err:
siw_cpu_info.num_nodes = 0;
- while (i) {
+ while (--i >= 0)
kfree(siw_cpu_info.tx_valid_cpus[i]);
- siw_cpu_info.tx_valid_cpus[i--] = NULL;
- }
kfree(siw_cpu_info.tx_valid_cpus);
siw_cpu_info.tx_valid_cpus = NULL;
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index e27bd5b35b96..0990307c5d2c 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1013,18 +1013,24 @@ out:
*/
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
{
- u64 cq_notify;
+ u32 cq_notify;
if (!cq->base_cq.comp_handler)
return false;
- cq_notify = READ_ONCE(*cq->notify);
+ /* Read application shared notification state */
+ cq_notify = READ_ONCE(cq->notify->flags);
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
((cq_notify & SIW_NOTIFY_SOLICITED) &&
(flags & SIW_WQE_SOLICITED))) {
- /* dis-arm CQ */
- smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
+ /*
+ * CQ notification is one-shot: Since the
+ * current CQE causes user notification,
+ * the CQ gets dis-aremd and must be re-aremd
+ * by the user for a new notification.
+ */
+ WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
return true;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 32dc79d0e898..e7f3a2379d9d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->lock);
- cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
+ cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
if (udata) {
struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- /* CQ event for next solicited completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
+ /*
+ * Enable CQ event for next solicited completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
else
- /* CQ event for any signalled completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
+ /*
+ * Enable CQ event for any signalled completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
return cq->cq_put - cq->cq_get;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..c5c93e48b4db 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
ste_live = true;
break;
case STRTAB_STE_0_CFG_ABORT:
- if (disable_bypass)
- break;
+ BUG_ON(!disable_bypass);
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..d991d40f797f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
@@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
@@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
- (cur_len + s_length <= max_len)) {
+ (max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
@@ -975,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn, off = vma->vm_pgoff;
int ret;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
- if (iova == DMA_MAPPING_ERROR)
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
goto out_free_page;
+ if (iommu_map(domain, iova, msi_addr, size, prot))
+ goto out_free_iova;
+
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 2b25d9c59336..471f05d452e0 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
tbl_wlk.ctx_entry = context;
m->private = &tbl_wlk;
- if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+ if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
pasid_dir_size = get_pasid_dir_size(context);
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bdaed2da8a55..12d094d08c0a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
dmar_domain = to_dmar_domain(domain);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
}
+ dmar_remove_one_dev_info(dev);
get_private_domain_for_dev(dev);
}
@@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
/* free the private domain */
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
domain_exit(info->domain);
free_devinfo_mem(info);
@@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- __dmar_remove_one_dev_info(info);
+ if (info)
+ __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
ret = iommu_request_dm_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
domain_add_dev_info(si_domain, dev);
dev_info(dev,
@@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
ret = iommu_request_dma_domain_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
@@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
if (!iommu)
return;
+ dmar_remove_one_dev_info(dev);
+
iommu_group_remove_device(dev);
iommu_device_unlink(&iommu->iommu, dev);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5da59c1..11ec048929e8 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
*/
pixsize = vout->bpp * vout->vrfb_bpp;
- dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
+ dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
xt->src_start = vout->buf_phy_addr[vb->i];
xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6abfc8e92fcc..16900357afc2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -465,6 +465,7 @@ config PCI_ENDPOINT_TEST
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
+ depends on HAS_IOMEM
help
This option enables support for the Xilinx SDFEC (Soft Decision
Forward Error Correction) driver. This enables a char driver
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 0c4894dd9c02..7a8f9d0b71b5 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -970,7 +970,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel context\n");
- goto free_ctx;
+ kfree(hdev->kernel_ctx);
+ goto mmu_fini;
}
rc = hl_cb_pool_init(hdev);
@@ -1053,8 +1054,6 @@ release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
"kernel ctx is still alive on initialization failure\n");
-free_ctx:
- kfree(hdev->kernel_ctx);
mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index a0e181714891..271c5c8f53b4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -2729,9 +2729,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
}
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
+void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
{
- /* Not needed in Goya */
+ /* The QMANs are on the SRAM so need to copy to IO space */
+ memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
}
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
@@ -3313,9 +3314,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
int rc;
dev_dbg(hdev->dev, "DMA packet details:\n");
- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->dst_addr));
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
@@ -3344,9 +3347,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
struct packet_lin_dma *user_dma_pkt)
{
dev_dbg(hdev->dev, "DMA packet details:\n");
- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->dst_addr));
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
/*
* WA for HW-23.
@@ -3386,7 +3391,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
dev_dbg(hdev->dev, "WREG32 packet details:\n");
dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
- dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
+ dev_dbg(hdev->dev, "value == 0x%x\n",
+ le32_to_cpu(wreg_pkt->value));
if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
@@ -3428,12 +3434,13 @@ static int goya_validate_cb(struct hl_device *hdev,
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
- void *user_pkt;
+ struct goya_packet *user_pkt;
- user_pkt = (void *) (uintptr_t)
+ user_pkt = (struct goya_packet *) (uintptr_t)
(parser->user_cb->kernel_address + cb_parsed_length);
- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
@@ -3453,7 +3460,8 @@ static int goya_validate_cb(struct hl_device *hdev,
* need to validate here as well because patch_cb() is
* not called in MMU path while this function is called
*/
- rc = goya_validate_wreg32(hdev, parser, user_pkt);
+ rc = goya_validate_wreg32(hdev,
+ parser, (struct packet_wreg32 *) user_pkt);
break;
case PACKET_WREG_BULK:
@@ -3481,10 +3489,10 @@ static int goya_validate_cb(struct hl_device *hdev,
case PACKET_LIN_DMA:
if (is_mmu)
rc = goya_validate_dma_pkt_mmu(hdev, parser,
- user_pkt);
+ (struct packet_lin_dma *) user_pkt);
else
rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
- user_pkt);
+ (struct packet_lin_dma *) user_pkt);
break;
case PACKET_MSG_LONG:
@@ -3657,15 +3665,16 @@ static int goya_patch_cb(struct hl_device *hdev,
enum packet_id pkt_id;
u16 pkt_size;
u32 new_pkt_size = 0;
- void *user_pkt, *kernel_pkt;
+ struct goya_packet *user_pkt, *kernel_pkt;
- user_pkt = (void *) (uintptr_t)
+ user_pkt = (struct goya_packet *) (uintptr_t)
(parser->user_cb->kernel_address + cb_parsed_length);
- kernel_pkt = (void *) (uintptr_t)
+ kernel_pkt = (struct goya_packet *) (uintptr_t)
(parser->patched_cb->kernel_address +
cb_patched_cur_length);
- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
@@ -3680,15 +3689,18 @@ static int goya_patch_cb(struct hl_device *hdev,
switch (pkt_id) {
case PACKET_LIN_DMA:
- rc = goya_patch_dma_packet(hdev, parser, user_pkt,
- kernel_pkt, &new_pkt_size);
+ rc = goya_patch_dma_packet(hdev, parser,
+ (struct packet_lin_dma *) user_pkt,
+ (struct packet_lin_dma *) kernel_pkt,
+ &new_pkt_size);
cb_patched_cur_length += new_pkt_size;
break;
case PACKET_WREG_32:
memcpy(kernel_pkt, user_pkt, pkt_size);
cb_patched_cur_length += pkt_size;
- rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
+ rc = goya_validate_wreg32(hdev, parser,
+ (struct packet_wreg32 *) kernel_pkt);
break;
case PACKET_WREG_BULK:
@@ -4352,6 +4364,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
size_t total_pkt_size;
long result;
int rc;
+ int irq_num_entries, irq_arr_index;
+ __le32 *goya_irq_arr;
total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
irq_arr_size;
@@ -4369,8 +4383,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
if (!pkt)
return -ENOMEM;
- pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
- memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+ irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
+ pkt->length = cpu_to_le32(irq_num_entries);
+
+ /* We must perform any necessary endianness conversation on the irq
+ * array being passed to the goya hardware
+ */
+ for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
+ irq_arr_index < irq_num_entries ; irq_arr_index++)
+ goya_irq_arr[irq_arr_index] =
+ cpu_to_le32(irq_arr[irq_arr_index]);
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
@@ -5042,7 +5064,7 @@ static const struct hl_asic_funcs goya_funcs = {
.resume = goya_resume,
.cb_mmap = goya_cb_mmap,
.ring_doorbell = goya_ring_doorbell,
- .flush_pq_write = goya_flush_pq_write,
+ .pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
.asic_dma_free_coherent = goya_dma_free_coherent,
.get_int_queue_base = goya_get_int_queue_base,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index f8c611883dc1..d7f48c9c41cd 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -177,7 +177,7 @@ int goya_late_init(struct hl_device *hdev);
void goya_late_fini(struct hl_device *hdev);
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
void goya_update_eq_ci(struct hl_device *hdev, u32 val);
void goya_restore_phase_topology(struct hl_device *hdev);
int goya_context_switch(struct hl_device *hdev, u32 asid);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 6a4c64b97f38..ce83adafcf2d 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -441,7 +441,11 @@ enum hl_pll_frequency {
* @resume: handles IP specific H/W or SW changes for resume.
* @cb_mmap: maps a CB.
* @ring_doorbell: increment PI on a given QMAN.
- * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
+ * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
+ * function because the PQs are located in different memory areas
+ * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
+ * writing the PQE must match the destination memory area
+ * properties.
* @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
* dma_alloc_coherent(). This is ASIC function because
* its implementation is not trivial when the driver
@@ -510,7 +514,8 @@ struct hl_asic_funcs {
int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u64 kaddress, phys_addr_t paddress, u32 size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
- void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
+ void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
+ struct hl_bd *bd);
void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index e3b5517897ea..5f5673b74985 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_bd bd;
- u64 *pi, *pbd = (u64 *) &bd;
+ __le64 *pi;
bd.ctl = 0;
- bd.len = __cpu_to_le32(job->job_cb_size);
- bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
+ bd.len = cpu_to_le32(job->job_cb_size);
+ bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
- pi = (u64 *) (uintptr_t) (q->kernel_address +
+ pi = (__le64 *) (uintptr_t) (q->kernel_address +
((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
- pi[0] = pbd[0];
- pi[1] = pbd[1];
-
q->pi++;
q->pi &= ((q->int_queue_len << 1) - 1);
- /* Flush PQ entry write. Relevant only for specific ASICs */
- hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
+ hdev->asic_funcs->pqe_write(hdev, pi, &bd);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index a14407b975e4..ef54bad20509 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -52,6 +52,19 @@ enum goya_dma_direction {
#define GOYA_PKT_CTL_MB_SHIFT 31
#define GOYA_PKT_CTL_MB_MASK 0x80000000
+/* All packets have, at least, an 8-byte header, which contains
+ * the packet type. The kernel driver uses the packet header for packet
+ * validation and to perform any necessary required preparation before
+ * sending them off to the hardware.
+ */
+struct goya_packet {
+ __le64 header;
+ /* The rest of the packet data follows. Use the corresponding
+ * packet_XXX struct to deference the data, based on packet type
+ */
+ u8 contents[0];
+};
+
struct packet_nop {
__le32 reserved;
__le32 ctl;
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index ea9f72ff456c..199791b57caf 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
struct hl_cs_job *job;
bool shadow_index_valid;
u16 shadow_index;
- u32 *cq_entry;
- u32 *cq_base;
+ struct hl_cq_entry *cq_entry, *cq_base;
if (hdev->disabled) {
dev_dbg(hdev->dev,
@@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
- cq_base = (u32 *) (uintptr_t) cq->kernel_address;
+ cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
while (1) {
- bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
+ bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
+ CQ_ENTRY_READY_MASK)
>> CQ_ENTRY_READY_SHIFT);
if (!entry_ready)
break;
- cq_entry = (u32 *) &cq_base[cq->ci];
+ cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
- /*
- * Make sure we read CQ entry contents after we've
+ /* Make sure we read CQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
- shadow_index_valid =
- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
+ shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
+ CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
>> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
- shadow_index = (u16)
- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
+ shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
+ CQ_ENTRY_SHADOW_INDEX_MASK)
>> CQ_ENTRY_SHADOW_INDEX_SHIFT);
queue = &hdev->kernel_queues[cq->hw_queue_id];
@@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
queue_work(hdev->cq_wq, &job->finish_work);
}
- /*
- * Update ci of the context's queue. There is no
+ /* Update ci of the context's queue. There is no
* need to protect it with spinlock because this update is
* done only inside IRQ and there is a different IRQ per
* queue
@@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
queue->ci = hl_queue_inc_ptr(queue->ci);
/* Clear CQ entry ready bit */
- cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
+ cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
+ ~CQ_ENTRY_READY_MASK);
cq->ci = hl_cq_inc_ptr(cq->ci);
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 42d237cae1dc..365fb0cb8dff 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
dev_dbg(hdev->dev,
"page list 0x%p of asid %d is still alive\n",
phys_pg_list, ctx->asid);
+ atomic64_sub(phys_pg_list->total_size,
+ &hdev->dram_used_mem);
free_phys_pg_pack(hdev, phys_pg_list);
idr_remove(&vm->phys_pg_pack_handles, i);
}
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 03cc788511d5..654bdc41fc99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
default:
/* Kept only for backward compatibility purpose. */
params->quad_enable = spansion_quad_enable;
- if (nor->clear_sr_bp)
- nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
break;
}
@@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor)
int err;
if (nor->clear_sr_bp) {
+ if (nor->quad_enable == spansion_quad_enable)
+ nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
+
err = nor->clear_sr_bp(nor);
if (err) {
dev_err(nor->dev,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8f3fbe5ca937..c258a1ce4b28 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1286,6 +1286,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
@@ -1316,6 +1319,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_update_formats(ctrl);
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1715,6 +1720,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ revalidate_disk(ns->head->disk);
}
#endif
}
@@ -2487,6 +2493,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
if (ret) {
dev_err(ctrl->device,
"failed to register subsystem device.\n");
+ put_device(&subsys->dev);
goto out_unlock;
}
ida_init(&subsys->ns_ida);
@@ -2509,7 +2516,6 @@ out_put_subsystem:
nvme_put_subsystem(subsys);
out_unlock:
mutex_unlock(&nvme_subsystems_lock);
- put_device(&subsys->dev);
return ret;
}
@@ -3571,6 +3577,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
struct nvme_ns *ns, *next;
LIST_HEAD(ns_list);
+ /*
+ * make sure to requeue I/O to all namespaces as these
+ * might result from the scan itself and must complete
+ * for the scan_work to make progress
+ */
+ nvme_mpath_clear_ctrl_paths(ctrl);
+
/* prevent racing with ns scanning */
flush_work(&ctrl->scan_work);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 4f0d0d12744e..888d4543894e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -12,6 +12,36 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_unfreeze_queue(h->disk->queue);
+}
+
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_freeze_queue_wait(h->disk->queue);
+}
+
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_freeze_queue_start(h->disk->queue);
+}
+
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
@@ -104,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
[NVME_ANA_CHANGE] = "change",
};
-void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
+ bool changed = false;
int node;
if (!head)
- return;
+ goto out;
for_each_node(node) {
- if (ns == rcu_access_pointer(head->current_path[node]))
+ if (ns == rcu_access_pointer(head->current_path[node])) {
rcu_assign_pointer(head->current_path[node], NULL);
+ changed = true;
+ }
}
+out:
+ return changed;
+}
+
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->scan_lock);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ if (nvme_mpath_clear_current_path(ns))
+ kblockd_schedule_work(&ns->head->requeue_work);
+ mutex_unlock(&ctrl->scan_lock);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
@@ -226,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
return ns;
}
+static bool nvme_available_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ switch (ns->ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ /* fallthru */
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
struct bio *bio)
{
@@ -252,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
ret = direct_make_request(bio);
- } else if (!list_empty_careful(&head->list)) {
- dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
+ } else if (nvme_available_path(head)) {
+ dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio);
spin_unlock_irq(&head->requeue_lock);
} else {
- dev_warn_ratelimited(dev, "no path - failing I/O\n");
+ dev_warn_ratelimited(dev, "no available path - failing I/O\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 26b563f9985b..778b3a0b6adb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -490,6 +490,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
return ctrl->ana_log_buf != NULL;
}
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
@@ -500,7 +503,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
-void nvme_mpath_clear_current_path(struct nvme_ns *ns);
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -548,7 +552,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ return false;
+}
+static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -568,6 +576,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
{
}
+static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index db160cee42ad..6bd9b1033965 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2695,7 +2695,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
- nvme_reset_ctrl_sync(&dev->ctrl);
+ flush_work(&dev->ctrl.reset_work);
flush_work(&dev->ctrl.scan_work);
nvme_put_ctrl(&dev->ctrl);
}
@@ -2761,6 +2761,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+ nvme_reset_ctrl(&dev->ctrl);
nvme_get_ctrl(&dev->ctrl);
async_schedule(nvme_async_probe, dev);
@@ -2846,7 +2847,7 @@ static int nvme_resume(struct device *dev)
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
struct nvme_ctrl *ctrl = &ndev->ctrl;
- if (pm_resume_via_firmware() || !ctrl->npss ||
+ if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
nvme_reset_ctrl(ctrl);
return 0;
@@ -2859,6 +2860,8 @@ static int nvme_suspend(struct device *dev)
struct nvme_ctrl *ctrl = &ndev->ctrl;
int ret = -EBUSY;
+ ndev->last_ps = U32_MAX;
+
/*
* The platform does not remove power for a kernel managed suspend so
* use host managed nvme power settings for lowest idle power if
@@ -2866,8 +2869,14 @@ static int nvme_suspend(struct device *dev)
* shutdown. But if the firmware is involved after the suspend or the
* device does not support any non-default power states, shut down the
* device fully.
+ *
+ * If ASPM is not enabled for the device, shut down the device and allow
+ * the PCI bus layer to put it into D3 in order to take the PCIe link
+ * down, so as to allow the platform to achieve its minimum low-power
+ * state (which may not be possible if the link is up).
*/
- if (pm_suspend_via_firmware() || !ctrl->npss) {
+ if (pm_suspend_via_firmware() || !ctrl->npss ||
+ !pcie_aspm_enabled(pdev)) {
nvme_dev_disable(ndev, true);
return 0;
}
@@ -2880,7 +2889,6 @@ static int nvme_suspend(struct device *dev)
ctrl->state != NVME_CTRL_ADMIN_ONLY)
goto unfreeze;
- ndev->last_ps = 0;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
if (ret < 0)
goto unfreeze;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a249db528d54..1a6449bc547b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -562,13 +562,17 @@ out_destroy_cm_id:
return ret;
}
+static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
return;
-
- rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->qp);
+ __nvme_rdma_stop_queue(queue);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
- if (!ret)
+ if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
- else
+ } else {
+ __nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
+ }
return ret;
}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index cd52b9f15376..98613a45bd3b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
found:
list_del(&p->entry);
+ nvmet_port_del_ctrls(port, subsys);
nvmet_port_disc_changed(port, subsys);
if (list_empty(&port->subsystems))
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index dad0243c7c96..3a67e244e568 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
u16 status;
switch (errno) {
+ case 0:
+ status = NVME_SC_SUCCESS;
+ break;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
@@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
}
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->port == port)
+ ctrl->ops->delete_ctrl(ctrl);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
int nvmet_enable_port(struct nvmet_port *port)
{
const struct nvmet_fabrics_ops *ops;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b16dc3981c69..0940c5024a34 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
mutex_lock(&nvme_loop_ports_mutex);
list_del_init(&port->entry);
mutex_unlock(&nvme_loop_ports_mutex);
+
+ /*
+ * Ensure any ctrls that are in the process of being
+ * deleted are in fact deleted before we return
+ * and free the port. This is to prevent active
+ * ctrls from using a port after it's freed.
+ */
+ flush_workqueue(nvme_delete_wq);
}
static const struct nvmet_fabrics_ops nvme_loop_ops = {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ee66c610739..c51f8dd01dc4 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+void nvmet_port_del_ctrls(struct nvmet_port *port,
+ struct nvmet_subsys *subsys);
+
int nvmet_enable_port(struct nvmet_port *port);
void nvmet_disable_port(struct nvmet_port *port);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7f84bb4903ca..a296eaf52a5b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
* of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
- * @out_irq: structure of_irq filled by this function
+ * @out_irq: structure of_phandle_args filled by this function
*
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index c1b67dd7cd6e..83c766233181 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
for_each_child_of_node(local_fixups, child) {
for_each_child_of_node(overlay, overlay_child)
- if (!node_name_cmp(child, overlay_child))
+ if (!node_name_cmp(child, overlay_child)) {
+ of_node_put(overlay_child);
break;
+ }
- if (!overlay_child)
+ if (!overlay_child) {
+ of_node_put(child);
return -EINVAL;
+ }
err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e44af7f4d37f..464f8f92653f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
NULL, 0644);
+/**
+ * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
+ * @pdev: Target device.
+ */
+bool pcie_aspm_enabled(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+ bool ret;
+
+ if (!bridge)
+ return false;
+
+ mutex_lock(&aspm_lock);
+ ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
+ mutex_unlock(&aspm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
+
#ifdef CONFIG_PCIEASPM_DEBUG
static ssize_t link_state_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index faf43b1d3dbe..a7549ae32542 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10776,12 +10776,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
/* This loop sets up all CPUs that are affinitized with a
* irq vector assigned to the driver. All affinitized CPUs
* will get a link to that vectors IRQ and EQ.
+ *
+ * NULL affinity mask handling:
+ * If irq count is greater than one, log an error message.
+ * If the null mask is received for the first irq, find the
+ * first present cpu, and assign the eq index to ensure at
+ * least one EQ is assigned.
*/
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
/* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp)
- continue;
+ if (!maskp) {
+ if (phba->cfg_irq_chann > 1)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3329 No affinity mask found "
+ "for vector %d (%d)\n",
+ idx, phba->cfg_irq_chann);
+ if (!idx) {
+ cpu = cpumask_first(cpu_present_mask);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->eq = idx;
+ cpup->irq = pci_irq_vector(phba->pcidev, idx);
+ cpup->flag |= LPFC_CPU_FIRST_IRQ;
+ }
+ break;
+ }
i = 0;
/* Loop through all CPUs associated with vector idx */
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 3a01cfd70fdc..f518273cfbe3 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig SOUNDWIRE
- bool "SoundWire support"
+ tristate "SoundWire support"
help
SoundWire is a 2-Pin interface with data and clock line ratified
by the MIPI Alliance. SoundWire is used for transporting data
@@ -17,17 +17,12 @@ if SOUNDWIRE
comment "SoundWire Devices"
-config SOUNDWIRE_BUS
- tristate
- select REGMAP_SOUNDWIRE
-
config SOUNDWIRE_CADENCE
tristate
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- select SOUNDWIRE_BUS
depends on X86 && ACPI && SND_SOC
help
SoundWire Intel Master driver.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index fd99a831b92a..45b7e5001653 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -5,7 +5,7 @@
#Bus Objs
soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
#Cadence Objs
soundwire-cadence-objs := cadence_master.o
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ff4badc9b3de..60e8bdee5c75 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -81,8 +81,8 @@
#define CDNS_MCP_INTSET 0x4C
-#define CDNS_SDW_SLAVE_STAT 0x50
-#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0)
+#define CDNS_MCP_SLAVE_STAT 0x50
+#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
#define CDNS_MCP_SLAVE_INTSTAT0 0x54
#define CDNS_MCP_SLAVE_INTSTAT1 0x58
@@ -96,8 +96,8 @@
#define CDNS_MCP_SLAVE_INTMASK0 0x5C
#define CDNS_MCP_SLAVE_INTMASK1 0x60
-#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0)
-#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0)
+#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
#define CDNS_MCP_PORT_INTSTAT 0x64
#define CDNS_MCP_PDI_STAT 0x6C
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7c96a01eef6c..fbdc33874780 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,12 +112,12 @@ source "drivers/staging/gasket/Kconfig"
source "drivers/staging/axis-fifo/Kconfig"
-source "drivers/staging/erofs/Kconfig"
-
source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
source "drivers/staging/isdn/Kconfig"
+source "drivers/staging/exfat/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index fcaac9693b83..ca13f87b1e1b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -46,7 +46,7 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
-obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_ISDN_CAPI) += isdn/
+obj-$(CONFIG_EXFAT_FS) += exfat/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index fbf015cc6d62..767dd98fd92d 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -6,8 +6,6 @@ TODO:
ion/
- - Add dt-bindings for remaining heaps (chunk and carveout heaps). This would
- involve putting appropriate bindings in a memory node for Ion to find.
- Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0)
- Better test framework (integration with VGEM was suggested)
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee91300..caf4d4df4bd3 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
unsigned int flags)
{
- int divider, base, prescale;
+ unsigned int divider, base, prescale;
- /* This function needs improvment */
+ /* This function needs improvement */
/* Don't know if divider==0 works. */
for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
}
prescale = 15;
- base = timer_base * (1 << prescale);
+ base = timer_base * (prescale + 1);
divider = 65535;
*nanosec = divider * base;
return (prescale << 16) | (divider);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index c175227009f1..f98e3ae27bff 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -596,7 +596,7 @@ static int ni_request_ao_mite_channel(struct comedi_device *dev)
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
- "failed to reserve mite dma channel for analog outut\n");
+ "failed to reserve mite dma channel for analog output\n");
return -EBUSY;
}
mite_chan->dir = COMEDI_OUTPUT;
diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt
deleted file mode 100644
index 0eab600ca7ca..000000000000
--- a/drivers/staging/erofs/Documentation/filesystems/erofs.txt
+++ /dev/null
@@ -1,223 +0,0 @@
-Overview
-========
-
-EROFS file-system stands for Enhanced Read-Only File System. Different
-from other read-only file systems, it aims to be designed for flexibility,
-scalability, but be kept simple and high performance.
-
-It is designed as a better filesystem solution for the following scenarios:
- - read-only storage media or
-
- - part of a fully trusted read-only solution, which means it needs to be
- immutable and bit-for-bit identical to the official golden image for
- their releases due to security and other considerations and
-
- - hope to save some extra storage space with guaranteed end-to-end performance
- by using reduced metadata and transparent file compression, especially
- for those embedded devices with limited memory (ex, smartphone);
-
-Here is the main features of EROFS:
- - Little endian on-disk design;
-
- - Currently 4KB block size (nobh) and therefore maximum 16TB address space;
-
- - Metadata & data could be mixed by design;
-
- - 2 inode versions for different requirements:
- v1 v2
- Inode metadata size: 32 bytes 64 bytes
- Max file size: 4 GB 16 EB (also limited by max. vol size)
- Max uids/gids: 65536 4294967296
- File creation time: no yes (64 + 32-bit timestamp)
- Max hardlinks: 65536 4294967296
- Metadata reserved: 4 bytes 14 bytes
-
- - Support extended attributes (xattrs) as an option;
-
- - Support xattr inline and tail-end data inline for all files;
-
- - Support POSIX.1e ACLs by using xattrs;
-
- - Support transparent file compression as an option:
- LZ4 algorithm with 4 KB fixed-output compression for high performance;
-
-The following git tree provides the file system user-space tools under
-development (ex, formatting tool mkfs.erofs):
->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
-
-Bugs and patches are welcome, please kindly help us and send to the following
-linux-erofs mailing list:
->> linux-erofs mailing list <linux-erofs@lists.ozlabs.org>
-
-Note that EROFS is still working in progress as a Linux staging driver,
-Cc the staging mailing list as well is highly recommended:
->> Linux Driver Project Developer List <devel@driverdev.osuosl.org>
-
-Mount options
-=============
-
-fault_injection=%d Enable fault injection in all supported types with
- specified injection rate. Supported injection type:
- Type_Name Type_Value
- FAULT_KMALLOC 0x000000001
- FAULT_READ_IO 0x000000002
-(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled
- by default if CONFIG_EROFS_FS_XATTR is selected.
-(no)acl Setup POSIX Access Control List. Note: acl is enabled
- by default if CONFIG_EROFS_FS_POSIX_ACL is selected.
-cache_strategy=%s Select a strategy for cached decompression from now on:
- disabled: In-place I/O decompression only;
- readahead: Cache the last incomplete compressed physical
- cluster for further reading. It still does
- in-place I/O decompression for the rest
- compressed physical clusters;
- readaround: Cache the both ends of incomplete compressed
- physical clusters for further reading.
- It still does in-place I/O decompression
- for the rest compressed physical clusters.
-
-Module parameters
-=================
-use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0).
-
-On-disk details
-===============
-
-Summary
--------
-Different from other read-only file systems, an EROFS volume is designed
-to be as simple as possible:
-
- |-> aligned with the block size
- ____________________________________________________________
- | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data |
- |_|__|_|_____|__________|_____|______|__________|_____|______|
- 0 +1K
-
-All data areas should be aligned with the block size, but metadata areas
-may not. All metadatas can be now observed in two different spaces (views):
- 1. Inode metadata space
- Each valid inode should be aligned with an inode slot, which is a fixed
- value (32 bytes) and designed to be kept in line with v1 inode size.
-
- Each inode can be directly found with the following formula:
- inode offset = meta_blkaddr * block_size + 32 * nid
-
- |-> aligned with 8B
- |-> followed closely
- + meta_blkaddr blocks |-> another slot
- _____________________________________________________________________
- | ... | inode | xattrs | extents | data inline | ... | inode ...
- |________|_______|(optional)|(optional)|__(optional)_|_____|__________
- |-> aligned with the inode slot size
- . .
- . .
- . .
- . .
- . .
- . .
- .____________________________________________________|-> aligned with 4B
- | xattr_ibody_header | shared xattrs | inline xattrs |
- |____________________|_______________|_______________|
- |-> 12 bytes <-|->x * 4 bytes<-| .
- . . .
- . . .
- . . .
- ._______________________________.______________________.
- | id | id | id | id | ... | id | ent | ... | ent| ... |
- |____|____|____|____|______|____|_____|_____|____|_____|
- |-> aligned with 4B
- |-> aligned with 4B
-
- Inode could be 32 or 64 bytes, which can be distinguished from a common
- field which all inode versions have -- i_advise:
-
- __________________ __________________
- | i_advise | | i_advise |
- |__________________| |__________________|
- | ... | | ... |
- | | | |
- |__________________| 32 bytes | |
- | |
- |__________________| 64 bytes
-
- Xattrs, extents, data inline are followed by the corresponding inode with
- proper alignes, and they could be optional for different data mappings,
- _currently_ there are totally 3 valid data mappings supported:
-
- 1) flat file data without data inline (no extent);
- 2) fixed-output size data compression (must have extents);
- 3) flat file data with tail-end data inline (no extent);
-
- The size of the optional xattrs is indicated by i_xattr_count in inode
- header. Large xattrs or xattrs shared by many different files can be
- stored in shared xattrs metadata rather than inlined right after inode.
-
- 2. Shared xattrs metadata space
- Shared xattrs space is similar to the above inode space, started with
- a specific block indicated by xattr_blkaddr, organized one by one with
- proper align.
-
- Each share xattr can also be directly found by the following formula:
- xattr offset = xattr_blkaddr * block_size + 4 * xattr_id
-
- |-> aligned by 4 bytes
- + xattr_blkaddr blocks |-> aligned with 4 bytes
- _________________________________________________________________________
- | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
- |________|_____________|_____________|_____|______________|_______________
-
-Directories
------------
-All directories are now organized in a compact on-disk format. Note that
-each directory block is divided into index and name areas in order to support
-random file lookup, and all directory entries are _strictly_ recorded in
-alphabetical order in order to support improved prefix binary search
-algorithm (could refer to the related source code).
-
- ___________________________
- / |
- / ______________|________________
- / / | nameoff1 | nameoffN-1
- ____________.______________._______________v________________v__________
-| dirent | dirent | ... | dirent | filename | filename | ... | filename |
-|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
- \ ^
- \ | * could have
- \ | trailing '\0'
- \________________________| nameoff0
-
- Directory block
-
-Note that apart from the offset of the first filename, nameoff0 also indicates
-the total number of directory entries in this block since it is no need to
-introduce another on-disk field at all.
-
-Compression
------------
-Currently, EROFS supports 4KB fixed-output clustersize transparent file
-compression, as illustrated below:
-
- |---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
- clusterofs clusterofs clusterofs
- | | | logical data
-_________v_______________________________v_____________________v_______________
-... | . | | . | | . | ...
-____|____.________|_____________|________.____|_____________|__.__________|____
- |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|
- size size size size size
- . . . .
- . . . .
- . . . .
- _______._____________._____________._____________._____________________
- ... | | | | ... physical data
- _______|_____________|_____________|_____________|_____________________
- |-> cluster <-|-> cluster <-|-> cluster <-|
- size size size
-
-Currently each on-disk physical cluster can contain 4KB (un)compressed data
-at most. For each logical cluster, there is a corresponding on-disk index to
-describe its cluster type, physical cluster address, etc.
-
-See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
-
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
deleted file mode 100644
index 16316d1adca3..000000000000
--- a/drivers/staging/erofs/Kconfig
+++ /dev/null
@@ -1,98 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config EROFS_FS
- tristate "EROFS filesystem support"
- depends on BLOCK
- help
- EROFS (Enhanced Read-Only File System) is a lightweight
- read-only file system with modern designs (eg. page-sized
- blocks, inline xattrs/data, etc.) for scenarios which need
- high-performance read-only requirements, e.g. Android OS
- for mobile phones and LIVECDs.
-
- It also provides fixed-sized output compression support,
- which improves storage density, keeps relatively higher
- compression ratios, which is more useful to achieve high
- performance for embedded devices with limited memory.
-
- If unsure, say N.
-
-config EROFS_FS_DEBUG
- bool "EROFS debugging feature"
- depends on EROFS_FS
- help
- Print debugging messages and enable more BUG_ONs which check
- filesystem consistency and find potential issues aggressively,
- which can be used for Android eng build, for example.
-
- For daily use, say N.
-
-config EROFS_FAULT_INJECTION
- bool "EROFS fault injection facility"
- depends on EROFS_FS
- help
- Test EROFS to inject faults such as ENOMEM, EIO, and so on.
- If unsure, say N.
-
-config EROFS_FS_XATTR
- bool "EROFS extended attributes"
- depends on EROFS_FS
- default y
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
-
- If unsure, say N.
-
-config EROFS_FS_POSIX_ACL
- bool "EROFS Access Control Lists"
- depends on EROFS_FS_XATTR
- select FS_POSIX_ACL
- default y
- help
- Posix Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
-
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
- If you don't know what Access Control Lists are, say N.
-
-config EROFS_FS_SECURITY
- bool "EROFS Security Labels"
- depends on EROFS_FS_XATTR
- default y
- help
- Security labels provide an access control facility to support Linux
- Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
- Linux. This option enables an extended attribute handler for file
- security labels in the erofs filesystem, so that it requires enabling
- the extended attribute support in advance.
-
- If you are not using a security module, say N.
-
-config EROFS_FS_ZIP
- bool "EROFS Data Compression Support"
- depends on EROFS_FS
- select LZ4_DECOMPRESS
- default y
- help
- Enable fixed-sized output compression for EROFS.
-
- If you don't want to enable compression feature, say N.
-
-config EROFS_FS_CLUSTER_PAGE_LIMIT
- int "EROFS Cluster Pages Hard Limit"
- depends on EROFS_FS_ZIP
- range 1 256
- default "1"
- help
- Indicates maximum # of pages of a compressed
- physical cluster.
-
- For example, if files in a image were compressed
- into 8k-unit, hard limit should not be configured
- less than 2. Otherwise, the image will be refused
- to mount on this kernel.
-
diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile
deleted file mode 100644
index 5cdae21cb5af..000000000000
--- a/drivers/staging/erofs/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-EROFS_VERSION = "1.0pre1"
-
-ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\"
-
-obj-$(CONFIG_EROFS_FS) += erofs.o
-# staging requirement: to be self-contained in its own directory
-ccflags-y += -I $(srctree)/$(src)/include
-erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
-erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
-erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
-
diff --git a/drivers/staging/erofs/TODO b/drivers/staging/erofs/TODO
deleted file mode 100644
index a8608b2f72bd..000000000000
--- a/drivers/staging/erofs/TODO
+++ /dev/null
@@ -1,46 +0,0 @@
-
-EROFS is still working in progress, thus it is not suitable
-for all productive uses. play at your own risk :)
-
-TODO List:
- - add the missing error handling code
- (mainly existed in xattr and decompression submodules);
-
- - finalize erofs ondisk format design (which means that
- minor on-disk revisions could happen later);
-
- - documentation and detailed technical analysis;
-
- - general code review and clean up
- (including confusing variable names and code snippets);
-
- - support larger compressed clustersizes for selection
- (currently erofs only works as expected with the page-sized
- compressed cluster configuration, usually 4KB);
-
- - support more lossless data compression algorithms
- in addition to LZ4 algorithms in VLE approach;
-
- - data deduplication and other useful features.
-
-The following git tree provides the file system user-space
-tools under development (ex, formatting tool mkfs.erofs):
->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
-
-The open-source development of erofs-utils is at the early stage.
-Contact the original author Li Guifu <bluce.liguifu@huawei.com> and
-the co-maintainer Fang Wei <fangwei1@huawei.com> for the latest news
-and more details.
-
-Code, suggestions, etc, are welcome. Please feel free to
-ask and send patches,
-
-To:
- linux-erofs mailing list <linux-erofs@lists.ozlabs.org>
- Gao Xiang <gaoxiang25@huawei.com>
- Chao Yu <yuchao0@huawei.com>
-
-Cc: (for linux-kernel upstream patches)
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- linux-staging mailing list <devel@driverdev.osuosl.org>
-
diff --git a/drivers/staging/erofs/compress.h b/drivers/staging/erofs/compress.h
deleted file mode 100644
index 043013f9ef1b..000000000000
--- a/drivers/staging/erofs/compress.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/staging/erofs/compress.h
- *
- * Copyright (C) 2019 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_FS_COMPRESS_H
-#define __EROFS_FS_COMPRESS_H
-
-#include "internal.h"
-
-enum {
- Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
- Z_EROFS_COMPRESSION_RUNTIME_MAX
-};
-
-struct z_erofs_decompress_req {
- struct super_block *sb;
- struct page **in, **out;
-
- unsigned short pageofs_out;
- unsigned int inputsize, outputsize;
-
- /* indicate the algorithm will be used for decompression */
- unsigned int alg;
- bool inplace_io, partial_decoding;
-};
-
-/*
- * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
- * used to mark temporary allocated pages from other
- * file/cached pages and NULL mapping pages.
- */
-#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
-
-/* check if a page is marked as staging */
-static inline bool z_erofs_page_is_staging(struct page *page)
-{
- return page->mapping == Z_EROFS_MAPPING_STAGING;
-}
-
-static inline bool z_erofs_put_stagingpage(struct list_head *pagepool,
- struct page *page)
-{
- if (!z_erofs_page_is_staging(page))
- return false;
-
- /* staging pages should not be used by others at the same time */
- if (page_ref_count(page) > 1)
- put_page(page);
- else
- list_add(&page->lru, pagepool);
- return true;
-}
-
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool);
-
-#endif
-
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
deleted file mode 100644
index 72c4b4c5296b..000000000000
--- a/drivers/staging/erofs/data.c
+++ /dev/null
@@ -1,425 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/data.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "internal.h"
-#include <linux/prefetch.h>
-
-#include <trace/events/erofs.h>
-
-static inline void read_endio(struct bio *bio)
-{
- struct super_block *const sb = bio->bi_private;
- struct bio_vec *bvec;
- blk_status_t err = bio->bi_status;
- struct bvec_iter_all iter_all;
-
- if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
- erofs_show_injection_info(FAULT_READ_IO);
- err = BLK_STS_IOERR;
- }
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
-
- /* page is already locked */
- DBG_BUGON(PageUptodate(page));
-
- if (unlikely(err))
- SetPageError(page);
- else
- SetPageUptodate(page);
-
- unlock_page(page);
- /* page could be reclaimed now */
- }
- bio_put(bio);
-}
-
-/* prio -- true is used for dir */
-struct page *__erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio, bool nofail)
-{
- struct inode *const bd_inode = sb->s_bdev->bd_inode;
- struct address_space *const mapping = bd_inode->i_mapping;
- /* prefer retrying in the allocator to blindly looping below */
- const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
- (nofail ? __GFP_NOFAIL : 0);
- unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
- struct page *page;
- int err;
-
-repeat:
- page = find_or_create_page(mapping, blkaddr, gfp);
- if (unlikely(!page)) {
- DBG_BUGON(nofail);
- return ERR_PTR(-ENOMEM);
- }
- DBG_BUGON(!PageLocked(page));
-
- if (!PageUptodate(page)) {
- struct bio *bio;
-
- bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
- if (IS_ERR(bio)) {
- DBG_BUGON(nofail);
- err = PTR_ERR(bio);
- goto err_out;
- }
-
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (unlikely(err != PAGE_SIZE)) {
- err = -EFAULT;
- goto err_out;
- }
-
- __submit_bio(bio, REQ_OP_READ,
- REQ_META | (prio ? REQ_PRIO : 0));
-
- lock_page(page);
-
- /* this page has been truncated by others */
- if (unlikely(page->mapping != mapping)) {
-unlock_repeat:
- unlock_page(page);
- put_page(page);
- goto repeat;
- }
-
- /* more likely a read error */
- if (unlikely(!PageUptodate(page))) {
- if (io_retries) {
- --io_retries;
- goto unlock_repeat;
- }
- err = -EIO;
- goto err_out;
- }
- }
- return page;
-
-err_out:
- unlock_page(page);
- put_page(page);
- return ERR_PTR(err);
-}
-
-static int erofs_map_blocks_flatmode(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- int err = 0;
- erofs_blk_t nblocks, lastblk;
- u64 offset = map->m_la;
- struct erofs_vnode *vi = EROFS_V(inode);
-
- trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
-
- nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
- lastblk = nblocks - is_inode_flat_inline(inode);
-
- if (unlikely(offset >= inode->i_size)) {
- /* leave out-of-bound access unmapped */
- map->m_flags = 0;
- map->m_plen = 0;
- goto out;
- }
-
- /* there is no hole in flatmode */
- map->m_flags = EROFS_MAP_MAPPED;
-
- if (offset < blknr_to_addr(lastblk)) {
- map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
- map->m_plen = blknr_to_addr(lastblk) - offset;
- } else if (is_inode_flat_inline(inode)) {
- /* 2 - inode inline B: inode, [xattrs], inline last blk... */
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
-
- map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
- vi->xattr_isize + erofs_blkoff(map->m_la);
- map->m_plen = inode->i_size - offset;
-
- /* inline data should be located in one meta block */
- if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
- errln("inline data cross block boundary @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- err = -EFSCORRUPTED;
- goto err_out;
- }
-
- map->m_flags |= EROFS_MAP_META;
- } else {
- errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
- vi->nid, inode->i_size, map->m_la);
- DBG_BUGON(1);
- err = -EIO;
- goto err_out;
- }
-
-out:
- map->m_llen = map->m_plen;
-
-err_out:
- trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
- return err;
-}
-
-int erofs_map_blocks(struct inode *inode,
- struct erofs_map_blocks *map, int flags)
-{
- if (unlikely(is_inode_layout_compression(inode))) {
- int err = z_erofs_map_blocks_iter(inode, map, flags);
-
- if (map->mpage) {
- put_page(map->mpage);
- map->mpage = NULL;
- }
- return err;
- }
- return erofs_map_blocks_flatmode(inode, map, flags);
-}
-
-static inline struct bio *erofs_read_raw_page(struct bio *bio,
- struct address_space *mapping,
- struct page *page,
- erofs_off_t *last_block,
- unsigned int nblocks,
- bool ra)
-{
- struct inode *const inode = mapping->host;
- struct super_block *const sb = inode->i_sb;
- erofs_off_t current_block = (erofs_off_t)page->index;
- int err;
-
- DBG_BUGON(!nblocks);
-
- if (PageUptodate(page)) {
- err = 0;
- goto has_updated;
- }
-
- /* note that for readpage case, bio also equals to NULL */
- if (bio &&
- /* not continuous */
- *last_block + 1 != current_block) {
-submit_bio_retry:
- __submit_bio(bio, REQ_OP_READ, 0);
- bio = NULL;
- }
-
- if (!bio) {
- struct erofs_map_blocks map = {
- .m_la = blknr_to_addr(current_block),
- };
- erofs_blk_t blknr;
- unsigned int blkoff;
-
- err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
- if (unlikely(err))
- goto err_out;
-
- /* zero out the holed page */
- if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
- zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
-
- /* imply err = 0, see erofs_map_blocks */
- goto has_updated;
- }
-
- /* for RAW access mode, m_plen must be equal to m_llen */
- DBG_BUGON(map.m_plen != map.m_llen);
-
- blknr = erofs_blknr(map.m_pa);
- blkoff = erofs_blkoff(map.m_pa);
-
- /* deal with inline page */
- if (map.m_flags & EROFS_MAP_META) {
- void *vsrc, *vto;
- struct page *ipage;
-
- DBG_BUGON(map.m_plen > PAGE_SIZE);
-
- ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
-
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto err_out;
- }
-
- vsrc = kmap_atomic(ipage);
- vto = kmap_atomic(page);
- memcpy(vto, vsrc + blkoff, map.m_plen);
- memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
- kunmap_atomic(vto);
- kunmap_atomic(vsrc);
- flush_dcache_page(page);
-
- SetPageUptodate(page);
- /* TODO: could we unlock the page earlier? */
- unlock_page(ipage);
- put_page(ipage);
-
- /* imply err = 0, see erofs_map_blocks */
- goto has_updated;
- }
-
- /* pa must be block-aligned for raw reading */
- DBG_BUGON(erofs_blkoff(map.m_pa));
-
- /* max # of continuous pages */
- if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
- nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
- if (nblocks > BIO_MAX_PAGES)
- nblocks = BIO_MAX_PAGES;
-
- bio = erofs_grab_bio(sb, blknr, nblocks, sb,
- read_endio, false);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- bio = NULL;
- goto err_out;
- }
- }
-
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- /* out of the extent or bio is full */
- if (err < PAGE_SIZE)
- goto submit_bio_retry;
-
- *last_block = current_block;
-
- /* shift in advance in case of it followed by too many gaps */
- if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
- /* err should reassign to 0 after submitting */
- err = 0;
- goto submit_bio_out;
- }
-
- return bio;
-
-err_out:
- /* for sync reading, set page error immediately */
- if (!ra) {
- SetPageError(page);
- ClearPageUptodate(page);
- }
-has_updated:
- unlock_page(page);
-
- /* if updated manually, continuous pages has a gap */
- if (bio)
-submit_bio_out:
- __submit_bio(bio, REQ_OP_READ, 0);
-
- return unlikely(err) ? ERR_PTR(err) : NULL;
-}
-
-/*
- * since we dont have write or truncate flows, so no inode
- * locking needs to be held at the moment.
- */
-static int erofs_raw_access_readpage(struct file *file, struct page *page)
-{
- erofs_off_t last_block;
- struct bio *bio;
-
- trace_erofs_readpage(page, true);
-
- bio = erofs_read_raw_page(NULL, page->mapping,
- page, &last_block, 1, false);
-
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
- return 0;
-}
-
-static int erofs_raw_access_readpages(struct file *filp,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned int nr_pages)
-{
- erofs_off_t last_block;
- struct bio *bio = NULL;
- gfp_t gfp = readahead_gfp_mask(mapping);
- struct page *page = list_last_entry(pages, struct page, lru);
-
- trace_erofs_readpages(mapping->host, page, nr_pages, true);
-
- for (; nr_pages; --nr_pages) {
- page = list_entry(pages->prev, struct page, lru);
-
- prefetchw(&page->flags);
- list_del(&page->lru);
-
- if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
- bio = erofs_read_raw_page(bio, mapping, page,
- &last_block, nr_pages, true);
-
- /* all the page errors are ignored when readahead */
- if (IS_ERR(bio)) {
- pr_err("%s, readahead error at page %lu of nid %llu\n",
- __func__, page->index,
- EROFS_V(mapping->host)->nid);
-
- bio = NULL;
- }
- }
-
- /* pages could still be locked */
- put_page(page);
- }
- DBG_BUGON(!list_empty(pages));
-
- /* the rare case (end in gaps) */
- if (unlikely(bio))
- __submit_bio(bio, REQ_OP_READ, 0);
- return 0;
-}
-
-static int erofs_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
-{
- struct erofs_map_blocks map = {
- .m_la = iblock << 9,
- };
- int err;
-
- err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
- if (err)
- return err;
-
- if (map.m_flags & EROFS_MAP_MAPPED)
- bh->b_blocknr = erofs_blknr(map.m_pa);
-
- return err;
-}
-
-static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
-{
- struct inode *inode = mapping->host;
-
- if (is_inode_flat_inline(inode)) {
- erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
-
- if (block >> LOG_SECTORS_PER_BLOCK >= blks)
- return 0;
- }
-
- return generic_block_bmap(mapping, block, erofs_get_block);
-}
-
-/* for uncompressed (aligned) files and raw access for other files */
-const struct address_space_operations erofs_raw_access_aops = {
- .readpage = erofs_raw_access_readpage,
- .readpages = erofs_raw_access_readpages,
- .bmap = erofs_bmap,
-};
-
diff --git a/drivers/staging/erofs/decompressor.c b/drivers/staging/erofs/decompressor.c
deleted file mode 100644
index 32a811ac704a..000000000000
--- a/drivers/staging/erofs/decompressor.c
+++ /dev/null
@@ -1,360 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/decompressor.c
- *
- * Copyright (C) 2019 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "compress.h"
-#include <linux/module.h>
-#include <linux/lz4.h>
-
-#ifndef LZ4_DISTANCE_MAX /* history window size */
-#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
-#endif
-
-#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
-#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
-#endif
-
-struct z_erofs_decompressor {
- /*
- * if destpages have sparsed pages, fill them with bounce pages.
- * it also check whether destpages indicate continuous physical memory.
- */
- int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool);
- int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
- char *name;
-};
-
-static bool use_vmap;
-module_param(use_vmap, bool, 0444);
-MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)");
-
-static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- const unsigned int nr =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
- unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
- BITS_PER_LONG)] = { 0 };
- void *kaddr = NULL;
- unsigned int i, j, top;
-
- top = 0;
- for (i = j = 0; i < nr; ++i, ++j) {
- struct page *const page = rq->out[i];
- struct page *victim;
-
- if (j >= LZ4_MAX_DISTANCE_PAGES)
- j = 0;
-
- /* 'valid' bounced can only be tested after a complete round */
- if (test_bit(j, bounced)) {
- DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
- DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
- availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
- }
-
- if (page) {
- __clear_bit(j, bounced);
- if (kaddr) {
- if (kaddr + PAGE_SIZE == page_address(page))
- kaddr += PAGE_SIZE;
- else
- kaddr = NULL;
- } else if (!i) {
- kaddr = page_address(page);
- }
- continue;
- }
- kaddr = NULL;
- __set_bit(j, bounced);
-
- if (top) {
- victim = availables[--top];
- get_page(victim);
- } else {
- victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
- if (unlikely(!victim))
- return -ENOMEM;
- victim->mapping = Z_EROFS_MAPPING_STAGING;
- }
- rq->out[i] = victim;
- }
- return kaddr ? 1 : 0;
-}
-
-static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
- u8 *src, unsigned int pageofs_in)
-{
- /*
- * if in-place decompression is ongoing, those decompressed
- * pages should be copied in order to avoid being overlapped.
- */
- struct page **in = rq->in;
- u8 *const tmp = erofs_get_pcpubuf(0);
- u8 *tmpp = tmp;
- unsigned int inlen = rq->inputsize - pageofs_in;
- unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
-
- while (tmpp < tmp + inlen) {
- if (!src)
- src = kmap_atomic(*in);
- memcpy(tmpp, src + pageofs_in, count);
- kunmap_atomic(src);
- src = NULL;
- tmpp += count;
- pageofs_in = 0;
- count = PAGE_SIZE;
- ++in;
- }
- return tmp;
-}
-
-static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
-{
- unsigned int inputmargin, inlen;
- u8 *src;
- bool copied, support_0padding;
- int ret;
-
- if (rq->inputsize > PAGE_SIZE)
- return -EOPNOTSUPP;
-
- src = kmap_atomic(*rq->in);
- inputmargin = 0;
- support_0padding = false;
-
- /* decompression inplace is only safe when 0padding is enabled */
- if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) {
- support_0padding = true;
-
- while (!src[inputmargin & ~PAGE_MASK])
- if (!(++inputmargin & ~PAGE_MASK))
- break;
-
- if (inputmargin >= rq->inputsize) {
- kunmap_atomic(src);
- return -EIO;
- }
- }
-
- copied = false;
- inlen = rq->inputsize - inputmargin;
- if (rq->inplace_io) {
- const uint oend = (rq->pageofs_out +
- rq->outputsize) & ~PAGE_MASK;
- const uint nr = PAGE_ALIGN(rq->pageofs_out +
- rq->outputsize) >> PAGE_SHIFT;
-
- if (rq->partial_decoding || !support_0padding ||
- rq->out[nr - 1] != rq->in[0] ||
- rq->inputsize - oend <
- LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
- src = generic_copy_inplace_data(rq, src, inputmargin);
- inputmargin = 0;
- copied = true;
- }
- }
-
- ret = LZ4_decompress_safe_partial(src + inputmargin, out,
- inlen, rq->outputsize,
- rq->outputsize);
- if (ret < 0) {
- errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]",
- __func__, src + inputmargin, inlen, inputmargin,
- out, rq->outputsize);
- WARN_ON(1);
- print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
- 16, 1, src + inputmargin, inlen, true);
- print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
- 16, 1, out, rq->outputsize, true);
- ret = -EIO;
- }
-
- if (copied)
- erofs_put_pcpubuf(src);
- else
- kunmap_atomic(src);
- return ret;
-}
-
-static struct z_erofs_decompressor decompressors[] = {
- [Z_EROFS_COMPRESSION_SHIFTED] = {
- .name = "shifted"
- },
- [Z_EROFS_COMPRESSION_LZ4] = {
- .prepare_destpages = lz4_prepare_destpages,
- .decompress = lz4_decompress,
- .name = "lz4"
- },
-};
-
-static void copy_from_pcpubuf(struct page **out, const char *dst,
- unsigned short pageofs_out,
- unsigned int outputsize)
-{
- const char *end = dst + outputsize;
- const unsigned int righthalf = PAGE_SIZE - pageofs_out;
- const char *cur = dst - pageofs_out;
-
- while (cur < end) {
- struct page *const page = *out++;
-
- if (page) {
- char *buf = kmap_atomic(page);
-
- if (cur >= dst) {
- memcpy(buf, cur, min_t(uint, PAGE_SIZE,
- end - cur));
- } else {
- memcpy(buf + pageofs_out, cur + pageofs_out,
- min_t(uint, righthalf, end - cur));
- }
- kunmap_atomic(buf);
- }
- cur += PAGE_SIZE;
- }
-}
-
-static void *erofs_vmap(struct page **pages, unsigned int count)
-{
- int i = 0;
-
- if (use_vmap)
- return vmap(pages, count, VM_MAP, PAGE_KERNEL);
-
- while (1) {
- void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
-
- /* retry two more times (totally 3 times) */
- if (addr || ++i >= 3)
- return addr;
- vm_unmap_aliases();
- }
- return NULL;
-}
-
-static void erofs_vunmap(const void *mem, unsigned int count)
-{
- if (!use_vmap)
- vm_unmap_ram(mem, count);
- else
- vunmap(mem);
-}
-
-static int decompress_generic(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const struct z_erofs_decompressor *alg = decompressors + rq->alg;
- unsigned int dst_maptype;
- void *dst;
- int ret;
-
- if (nrpages_out == 1 && !rq->inplace_io) {
- DBG_BUGON(!*rq->out);
- dst = kmap_atomic(*rq->out);
- dst_maptype = 0;
- goto dstmap_out;
- }
-
- /*
- * For the case of small output size (especially much less
- * than PAGE_SIZE), memcpy the decompressed data rather than
- * compressed data is preferred.
- */
- if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
- dst = erofs_get_pcpubuf(0);
- if (IS_ERR(dst))
- return PTR_ERR(dst);
-
- rq->inplace_io = false;
- ret = alg->decompress(rq, dst);
- if (!ret)
- copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
- rq->outputsize);
-
- erofs_put_pcpubuf(dst);
- return ret;
- }
-
- ret = alg->prepare_destpages(rq, pagepool);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- dst = page_address(*rq->out);
- dst_maptype = 1;
- goto dstmap_out;
- }
-
- dst = erofs_vmap(rq->out, nrpages_out);
- if (!dst)
- return -ENOMEM;
- dst_maptype = 2;
-
-dstmap_out:
- ret = alg->decompress(rq, dst + rq->pageofs_out);
-
- if (!dst_maptype)
- kunmap_atomic(dst);
- else if (dst_maptype == 2)
- erofs_vunmap(dst, nrpages_out);
- return ret;
-}
-
-static int shifted_decompress(const struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
- unsigned char *src, *dst;
-
- if (nrpages_out > 2) {
- DBG_BUGON(1);
- return -EIO;
- }
-
- if (rq->out[0] == *rq->in) {
- DBG_BUGON(nrpages_out != 1);
- return 0;
- }
-
- src = kmap_atomic(*rq->in);
- if (!rq->out[0]) {
- dst = NULL;
- } else {
- dst = kmap_atomic(rq->out[0]);
- memcpy(dst + rq->pageofs_out, src, righthalf);
- }
-
- if (rq->out[1] == *rq->in) {
- memmove(src, src + righthalf, rq->pageofs_out);
- } else if (nrpages_out == 2) {
- if (dst)
- kunmap_atomic(dst);
- DBG_BUGON(!rq->out[1]);
- dst = kmap_atomic(rq->out[1]);
- memcpy(dst, src + righthalf, rq->pageofs_out);
- }
- if (dst)
- kunmap_atomic(dst);
- kunmap_atomic(src);
- return 0;
-}
-
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
- return shifted_decompress(rq, pagepool);
- return decompress_generic(rq, pagepool);
-}
-
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
deleted file mode 100644
index 01efc96e1212..000000000000
--- a/drivers/staging/erofs/dir.c
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/dir.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "internal.h"
-
-static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
- [EROFS_FT_UNKNOWN] = DT_UNKNOWN,
- [EROFS_FT_REG_FILE] = DT_REG,
- [EROFS_FT_DIR] = DT_DIR,
- [EROFS_FT_CHRDEV] = DT_CHR,
- [EROFS_FT_BLKDEV] = DT_BLK,
- [EROFS_FT_FIFO] = DT_FIFO,
- [EROFS_FT_SOCK] = DT_SOCK,
- [EROFS_FT_SYMLINK] = DT_LNK,
-};
-
-static void debug_one_dentry(unsigned char d_type, const char *de_name,
- unsigned int de_namelen)
-{
-#ifdef CONFIG_EROFS_FS_DEBUG
- /* since the on-disk name could not have the trailing '\0' */
- unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
-
- memcpy(dbg_namebuf, de_name, de_namelen);
- dbg_namebuf[de_namelen] = '\0';
-
- debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
- de_namelen, d_type);
-#endif
-}
-
-static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
- void *dentry_blk, unsigned int *ofs,
- unsigned int nameoff, unsigned int maxsize)
-{
- struct erofs_dirent *de = dentry_blk + *ofs;
- const struct erofs_dirent *end = dentry_blk + nameoff;
-
- while (de < end) {
- const char *de_name;
- unsigned int de_namelen;
- unsigned char d_type;
-
- if (de->file_type < EROFS_FT_MAX)
- d_type = erofs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
-
- nameoff = le16_to_cpu(de->nameoff);
- de_name = (char *)dentry_blk + nameoff;
-
- /* the last dirent in the block? */
- if (de + 1 >= end)
- de_namelen = strnlen(de_name, maxsize - nameoff);
- else
- de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
-
- /* a corrupted entry is found */
- if (unlikely(nameoff + de_namelen > maxsize ||
- de_namelen > EROFS_NAME_LEN)) {
- errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- debug_one_dentry(d_type, de_name, de_namelen);
- if (!dir_emit(ctx, de_name, de_namelen,
- le64_to_cpu(de->nid), d_type))
- /* stopped by some reason */
- return 1;
- ++de;
- *ofs += sizeof(struct erofs_dirent);
- }
- *ofs = maxsize;
- return 0;
-}
-
-static int erofs_readdir(struct file *f, struct dir_context *ctx)
-{
- struct inode *dir = file_inode(f);
- struct address_space *mapping = dir->i_mapping;
- const size_t dirsize = i_size_read(dir);
- unsigned int i = ctx->pos / EROFS_BLKSIZ;
- unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
- int err = 0;
- bool initial = true;
-
- while (ctx->pos < dirsize) {
- struct page *dentry_page;
- struct erofs_dirent *de;
- unsigned int nameoff, maxsize;
-
- dentry_page = read_mapping_page(mapping, i, NULL);
- if (IS_ERR(dentry_page))
- continue;
-
- de = (struct erofs_dirent *)kmap(dentry_page);
-
- nameoff = le16_to_cpu(de->nameoff);
-
- if (unlikely(nameoff < sizeof(struct erofs_dirent) ||
- nameoff >= PAGE_SIZE)) {
- errln("%s, invalid de[0].nameoff %u @ nid %llu",
- __func__, nameoff, EROFS_V(dir)->nid);
- err = -EFSCORRUPTED;
- goto skip_this;
- }
-
- maxsize = min_t(unsigned int,
- dirsize - ctx->pos + ofs, PAGE_SIZE);
-
- /* search dirents at the arbitrary position */
- if (unlikely(initial)) {
- initial = false;
-
- ofs = roundup(ofs, sizeof(struct erofs_dirent));
- if (unlikely(ofs >= nameoff))
- goto skip_this;
- }
-
- err = erofs_fill_dentries(dir, ctx, de, &ofs,
- nameoff, maxsize);
-skip_this:
- kunmap(dentry_page);
-
- put_page(dentry_page);
-
- ctx->pos = blknr_to_addr(i) + ofs;
-
- if (unlikely(err))
- break;
- ++i;
- ofs = 0;
- }
- return err < 0 ? err : 0;
-}
-
-const struct file_operations erofs_dir_fops = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .iterate_shared = erofs_readdir,
-};
-
diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
deleted file mode 100644
index 8dc2a75e478f..000000000000
--- a/drivers/staging/erofs/erofs_fs.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */
-/*
- * linux/drivers/staging/erofs/erofs_fs.h
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_FS_H
-#define __EROFS_FS_H
-
-/* Enhanced(Extended) ROM File System */
-#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
-#define EROFS_SUPER_OFFSET 1024
-
-/*
- * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be
- * incompatible with this kernel version.
- */
-#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001
-#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING
-
-struct erofs_super_block {
-/* 0 */__le32 magic; /* in the little endian */
-/* 4 */__le32 checksum; /* crc32c(super_block) */
-/* 8 */__le32 features; /* (aka. feature_compat) */
-/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */
-/* 13 */__u8 reserved;
-
-/* 14 */__le16 root_nid;
-/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */
-
-/* 24 */__le64 build_time; /* inode v1 time derivation */
-/* 32 */__le32 build_time_nsec;
-/* 36 */__le32 blocks; /* used for statfs */
-/* 40 */__le32 meta_blkaddr;
-/* 44 */__le32 xattr_blkaddr;
-/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */
-/* 64 */__u8 volume_name[16]; /* volume name */
-/* 80 */__le32 requirements; /* (aka. feature_incompat) */
-
-/* 84 */__u8 reserved2[44];
-} __packed; /* 128 bytes */
-
-/*
- * erofs inode data mapping:
- * 0 - inode plain without inline data A:
- * inode, [xattrs], ... | ... | no-holed data
- * 1 - inode VLE compression B (legacy):
- * inode, [xattrs], extents ... | ...
- * 2 - inode plain with inline data C:
- * inode, [xattrs], last_inline_data, ... | ... | no-holed data
- * 3 - inode compression D:
- * inode, [xattrs], map_header, extents ... | ...
- * 4~7 - reserved
- */
-enum {
- EROFS_INODE_FLAT_PLAIN,
- EROFS_INODE_FLAT_COMPRESSION_LEGACY,
- EROFS_INODE_FLAT_INLINE,
- EROFS_INODE_FLAT_COMPRESSION,
- EROFS_INODE_LAYOUT_MAX
-};
-
-static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
-{
- if (datamode == EROFS_INODE_FLAT_COMPRESSION)
- return true;
- return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
-}
-
-/* bit definitions of inode i_advise */
-#define EROFS_I_VERSION_BITS 1
-#define EROFS_I_DATA_MAPPING_BITS 3
-
-#define EROFS_I_VERSION_BIT 0
-#define EROFS_I_DATA_MAPPING_BIT 1
-
-struct erofs_inode_v1 {
-/* 0 */__le16 i_advise;
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
-/* 2 */__le16 i_xattr_icount;
-/* 4 */__le16 i_mode;
-/* 6 */__le16 i_nlink;
-/* 8 */__le32 i_size;
-/* 12 */__le32 i_reserved;
-/* 16 */union {
- /* file total compressed blocks for data mapping 1 */
- __le32 compressed_blocks;
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
- } i_u __packed;
-/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */
-/* 24 */__le16 i_uid;
-/* 26 */__le16 i_gid;
-/* 28 */__le32 i_reserved2;
-} __packed;
-
-/* 32 bytes on-disk inode */
-#define EROFS_INODE_LAYOUT_V1 0
-/* 64 bytes on-disk inode */
-#define EROFS_INODE_LAYOUT_V2 1
-
-struct erofs_inode_v2 {
-/* 0 */__le16 i_advise;
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
-/* 2 */__le16 i_xattr_icount;
-/* 4 */__le16 i_mode;
-/* 6 */__le16 i_reserved;
-/* 8 */__le64 i_size;
-/* 16 */union {
- /* file total compressed blocks for data mapping 1 */
- __le32 compressed_blocks;
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
- } i_u __packed;
-
- /* only used for 32-bit stat compatibility */
-/* 20 */__le32 i_ino;
-
-/* 24 */__le32 i_uid;
-/* 28 */__le32 i_gid;
-/* 32 */__le64 i_ctime;
-/* 40 */__le32 i_ctime_nsec;
-/* 44 */__le32 i_nlink;
-/* 48 */__u8 i_reserved2[16];
-} __packed; /* 64 bytes */
-
-#define EROFS_MAX_SHARED_XATTRS (128)
-/* h_shared_count between 129 ... 255 are special # */
-#define EROFS_SHARED_XATTR_EXTENT (255)
-
-/*
- * inline xattrs (n == i_xattr_icount):
- * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
- * 12 bytes / \
- * / \
- * /-----------------------\
- * | erofs_xattr_entries+ |
- * +-----------------------+
- * inline xattrs must starts in erofs_xattr_ibody_header,
- * for read-only fs, no need to introduce h_refcount
- */
-struct erofs_xattr_ibody_header {
- __le32 h_reserved;
- __u8 h_shared_count;
- __u8 h_reserved2[7];
- __le32 h_shared_xattrs[0]; /* shared xattr id array */
-} __packed;
-
-/* Name indexes */
-#define EROFS_XATTR_INDEX_USER 1
-#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2
-#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3
-#define EROFS_XATTR_INDEX_TRUSTED 4
-#define EROFS_XATTR_INDEX_LUSTRE 5
-#define EROFS_XATTR_INDEX_SECURITY 6
-
-/* xattr entry (for both inline & shared xattrs) */
-struct erofs_xattr_entry {
- __u8 e_name_len; /* length of name */
- __u8 e_name_index; /* attribute name index */
- __le16 e_value_size; /* size of attribute value */
- /* followed by e_name and e_value */
- char e_name[0]; /* attribute name */
-} __packed;
-
-#define ondisk_xattr_ibody_size(count) ({\
- u32 __count = le16_to_cpu(count); \
- ((__count) == 0) ? 0 : \
- sizeof(struct erofs_xattr_ibody_header) + \
- sizeof(__u32) * ((__count) - 1); })
-
-#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry))
-#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \
- sizeof(struct erofs_xattr_entry) + \
- (entry)->e_name_len + le16_to_cpu((entry)->e_value_size))
-
-/* available compression algorithm types */
-enum {
- Z_EROFS_COMPRESSION_LZ4,
- Z_EROFS_COMPRESSION_MAX
-};
-
-/*
- * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
- * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
- * (4B) + 2B + (4B) if compacted 2B is on.
- */
-#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0
-
-#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT)
-
-struct z_erofs_map_header {
- __le32 h_reserved1;
- __le16 h_advise;
- /*
- * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
- * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
- */
- __u8 h_algorithmtype;
- /*
- * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
- * bit 3-4 : (physical - logical) cluster bits of head 1:
- * For example, if logical clustersize = 4096, 1 for 8192.
- * bit 5-7 : (physical - logical) cluster bits of head 2.
- */
- __u8 h_clusterbits;
-};
-
-#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
-
-/*
- * Z_EROFS Variable-sized Logical Extent cluster type:
- * 0 - literal (uncompressed) cluster
- * 1 - compressed cluster (for the head logical cluster)
- * 2 - compressed cluster (for the other logical clusters)
- *
- * In detail,
- * 0 - literal (uncompressed) cluster,
- * di_advise = 0
- * di_clusterofs = the literal data offset of the cluster
- * di_blkaddr = the blkaddr of the literal cluster
- *
- * 1 - compressed cluster (for the head logical cluster)
- * di_advise = 1
- * di_clusterofs = the decompressed data offset of the cluster
- * di_blkaddr = the blkaddr of the compressed cluster
- *
- * 2 - compressed cluster (for the other logical clusters)
- * di_advise = 2
- * di_clusterofs =
- * the decompressed data offset in its own head cluster
- * di_u.delta[0] = distance to its corresponding head cluster
- * di_u.delta[1] = distance to its corresponding tail cluster
- * (di_advise could be 0, 1 or 2)
- */
-enum {
- Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
- Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
- Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
- Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
- Z_EROFS_VLE_CLUSTER_TYPE_MAX
-};
-
-#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
-#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
-
-struct z_erofs_vle_decompressed_index {
- __le16 di_advise;
- /* where to decompress in the head cluster */
- __le16 di_clusterofs;
-
- union {
- /* for the head cluster */
- __le32 blkaddr;
- /*
- * for the rest clusters
- * eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
- * [0] - pointing to the head cluster
- * [1] - pointing to the tail cluster
- */
- __le16 delta[2];
- } di_u __packed; /* 8 bytes */
-} __packed;
-
-#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
- (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
- sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
-
-/* dirent sorts in alphabet order, thus we can do binary search */
-struct erofs_dirent {
- __le64 nid; /* 0, node number */
- __le16 nameoff; /* 8, start offset of file name */
- __u8 file_type; /* 10, file type */
- __u8 reserved; /* 11, reserved */
-} __packed;
-
-/* file types used in inode_info->flags */
-enum {
- EROFS_FT_UNKNOWN,
- EROFS_FT_REG_FILE,
- EROFS_FT_DIR,
- EROFS_FT_CHRDEV,
- EROFS_FT_BLKDEV,
- EROFS_FT_FIFO,
- EROFS_FT_SOCK,
- EROFS_FT_SYMLINK,
- EROFS_FT_MAX
-};
-
-#define EROFS_NAME_LEN 255
-
-/* check the EROFS on-disk layout strictly at compile time */
-static inline void erofs_check_ondisk_layout_definitions(void)
-{
- BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
- BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32);
- BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64);
- BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
- BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4);
- BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
- BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
- BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
-
- BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
- Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
-}
-
-#endif
-
diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h
deleted file mode 100644
index bfb2da9c4eee..000000000000
--- a/drivers/staging/erofs/include/trace/events/erofs.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM erofs
-
-#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EROFS_H
-
-#include <linux/tracepoint.h>
-
-#define show_dev(dev) MAJOR(dev), MINOR(dev)
-#define show_dev_nid(entry) show_dev(entry->dev), entry->nid
-
-#define show_file_type(type) \
- __print_symbolic(type, \
- { 0, "FILE" }, \
- { 1, "DIR" })
-
-#define show_map_flags(flags) __print_flags(flags, "|", \
- { EROFS_GET_BLOCKS_RAW, "RAW" })
-
-#define show_mflags(flags) __print_flags(flags, "", \
- { EROFS_MAP_MAPPED, "M" }, \
- { EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ZIPPED, "Z" })
-
-TRACE_EVENT(erofs_lookup,
-
- TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags),
-
- TP_ARGS(dir, dentry, flags),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(const char *, name )
- __field(unsigned int, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = dir->i_sb->s_dev;
- __entry->nid = EROFS_V(dir)->nid;
- __entry->name = dentry->d_name.name;
- __entry->flags = flags;
- ),
-
- TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
- show_dev_nid(__entry),
- __entry->name,
- __entry->flags)
-);
-
-TRACE_EVENT(erofs_fill_inode,
- TP_PROTO(struct inode *inode, int isdir),
- TP_ARGS(inode, isdir),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(erofs_blk_t, blkaddr )
- __field(unsigned int, ofs )
- __field(int, isdir )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->isdir = isdir;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d",
- show_dev_nid(__entry),
- __entry->blkaddr, __entry->ofs,
- __entry->isdir)
-);
-
-TRACE_EVENT(erofs_readpage,
-
- TP_PROTO(struct page *page, bool raw),
-
- TP_ARGS(page, raw),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(int, dir )
- __field(pgoff_t, index )
- __field(int, uptodate)
- __field(bool, raw )
- ),
-
- TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_V(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
- __entry->raw = raw;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d "
- "raw = %d",
- show_dev_nid(__entry),
- show_file_type(__entry->dir),
- (unsigned long)__entry->index,
- __entry->uptodate,
- __entry->raw)
-);
-
-TRACE_EVENT(erofs_readpages,
-
- TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage,
- bool raw),
-
- TP_ARGS(inode, page, nrpage, raw),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(pgoff_t, start )
- __field(unsigned int, nrpage )
- __field(bool, raw )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->start = page->index;
- __entry->nrpage = nrpage;
- __entry->raw = raw;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d",
- show_dev_nid(__entry),
- (unsigned long)__entry->start,
- __entry->nrpage,
- __entry->raw)
-);
-
-DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- __field( erofs_off_t, la )
- __field( u64, llen )
- __field( unsigned int, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->la = map->m_la;
- __entry->llen = map->m_llen;
- __entry->flags = flags;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s",
- show_dev_nid(__entry),
- __entry->la, __entry->llen,
- __entry->flags ? show_map_flags(__entry->flags) : "NULL")
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- __field( unsigned int, flags )
- __field( erofs_off_t, la )
- __field( erofs_off_t, pa )
- __field( u64, llen )
- __field( u64, plen )
- __field( unsigned int, mflags )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->flags = flags;
- __entry->la = map->m_la;
- __entry->pa = map->m_pa;
- __entry->llen = map->m_llen;
- __entry->plen = map->m_plen;
- __entry->mflags = map->m_flags;
- __entry->ret = ret;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, flags %s "
- "la %llu pa %llu llen %llu plen %llu mflags %s ret %d",
- show_dev_nid(__entry),
- __entry->flags ? show_map_flags(__entry->flags) : "NULL",
- __entry->la, __entry->pa, __entry->llen, __entry->plen,
- show_mflags(__entry->mflags), __entry->ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-TRACE_EVENT(erofs_destroy_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry))
-);
-
-#endif /* _TRACE_EROFS_H */
-
- /* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
deleted file mode 100644
index cbc2c342a37f..000000000000
--- a/drivers/staging/erofs/inode.c
+++ /dev/null
@@ -1,334 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/inode.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "xattr.h"
-
-#include <trace/events/erofs.h>
-
-/* no locking */
-static int read_inode(struct inode *inode, void *data)
-{
- struct erofs_vnode *vi = EROFS_V(inode);
- struct erofs_inode_v1 *v1 = data;
- const unsigned int advise = le16_to_cpu(v1->i_advise);
- erofs_blk_t nblks = 0;
-
- vi->datamode = __inode_data_mapping(advise);
-
- if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) {
- errln("unsupported data mapping %u of nid %llu",
- vi->datamode, vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
- }
-
- if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) {
- struct erofs_inode_v2 *v2 = data;
-
- vi->inode_isize = sizeof(struct erofs_inode_v2);
- vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
-
- inode->i_mode = le16_to_cpu(v2->i_mode);
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
- else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(v2->i_u.rdev));
- else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode))
- inode->i_rdev = 0;
- else
- goto bogusimode;
-
- i_uid_write(inode, le32_to_cpu(v2->i_uid));
- i_gid_write(inode, le32_to_cpu(v2->i_gid));
- set_nlink(inode, le32_to_cpu(v2->i_nlink));
-
- /* ns timestamp */
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
- le64_to_cpu(v2->i_ctime);
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
- le32_to_cpu(v2->i_ctime_nsec);
-
- inode->i_size = le64_to_cpu(v2->i_size);
-
- /* total blocks for compressed files */
- if (is_inode_layout_compression(inode))
- nblks = le32_to_cpu(v2->i_u.compressed_blocks);
- } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
-
- vi->inode_isize = sizeof(struct erofs_inode_v1);
- vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
-
- inode->i_mode = le16_to_cpu(v1->i_mode);
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
- else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(v1->i_u.rdev));
- else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode))
- inode->i_rdev = 0;
- else
- goto bogusimode;
-
- i_uid_write(inode, le16_to_cpu(v1->i_uid));
- i_gid_write(inode, le16_to_cpu(v1->i_gid));
- set_nlink(inode, le16_to_cpu(v1->i_nlink));
-
- /* use build time to derive all file time */
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
- sbi->build_time;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
- sbi->build_time_nsec;
-
- inode->i_size = le32_to_cpu(v1->i_size);
- if (is_inode_layout_compression(inode))
- nblks = le32_to_cpu(v1->i_u.compressed_blocks);
- } else {
- errln("unsupported on-disk inode version %u of nid %llu",
- __inode_version(advise), vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
- }
-
- if (!nblks)
- /* measure inode.i_blocks as generic filesystems */
- inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
- else
- inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
- return 0;
-
-bogusimode:
- errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
-}
-
-/*
- * try_lock can be required since locking order is:
- * file data(fs_inode)
- * meta(bd_inode)
- * but the majority of the callers is "iget",
- * in that case we are pretty sure no deadlock since
- * no data operations exist. However I tend to
- * try_lock since it takes no much overhead and
- * will success immediately.
- */
-static int fill_inline_data(struct inode *inode, void *data,
- unsigned int m_pofs)
-{
- struct erofs_vnode *vi = EROFS_V(inode);
- struct erofs_sb_info *sbi = EROFS_I_SB(inode);
-
- /* should be inode inline C */
- if (!is_inode_flat_inline(inode))
- return 0;
-
- /* fast symlink (following ext4) */
- if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
- char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
-
- if (unlikely(!lnk))
- return -ENOMEM;
-
- m_pofs += vi->inode_isize + vi->xattr_isize;
-
- /* inline symlink data shouldn't across page boundary as well */
- if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
- kfree(lnk);
- errln("inline data cross block boundary @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- /* get in-page inline data */
- memcpy(lnk, data + m_pofs, inode->i_size);
- lnk[inode->i_size] = '\0';
-
- inode->i_link = lnk;
- set_inode_fast_symlink(inode);
- }
- return 0;
-}
-
-static int fill_inode(struct inode *inode, int isdir)
-{
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
- struct erofs_vnode *vi = EROFS_V(inode);
- struct page *page;
- void *data;
- int err;
- erofs_blk_t blkaddr;
- unsigned int ofs;
- erofs_off_t inode_loc;
-
- trace_erofs_fill_inode(inode, isdir);
- inode_loc = iloc(sbi, vi->nid);
- blkaddr = erofs_blknr(inode_loc);
- ofs = erofs_blkoff(inode_loc);
-
- debugln("%s, reading inode nid %llu at %u of blkaddr %u",
- __func__, vi->nid, ofs, blkaddr);
-
- page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir);
-
- if (IS_ERR(page)) {
- errln("failed to get inode (nid: %llu) page, err %ld",
- vi->nid, PTR_ERR(page));
- return PTR_ERR(page);
- }
-
- DBG_BUGON(!PageUptodate(page));
- data = page_address(page);
-
- err = read_inode(inode, data + ofs);
- if (!err) {
- /* setup the new inode */
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &erofs_generic_iops;
- inode->i_fop = &generic_ro_fops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &erofs_dir_iops;
- inode->i_fop = &erofs_dir_fops;
- } else if (S_ISLNK(inode->i_mode)) {
- /* by default, page_get_link is used for symlink */
- inode->i_op = &erofs_symlink_iops;
- inode_nohighmem(inode);
- } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
- S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- inode->i_op = &erofs_generic_iops;
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
- goto out_unlock;
- } else {
- err = -EFSCORRUPTED;
- goto out_unlock;
- }
-
- if (is_inode_layout_compression(inode)) {
- err = z_erofs_fill_inode(inode);
- goto out_unlock;
- }
-
- inode->i_mapping->a_ops = &erofs_raw_access_aops;
-
- /* fill last page if inline data is available */
- err = fill_inline_data(inode, data, ofs);
- }
-
-out_unlock:
- unlock_page(page);
- put_page(page);
- return err;
-}
-
-/*
- * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
- * we should do more for 32-bit platform to find the right inode.
- */
-#if BITS_PER_LONG == 32
-static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
-{
- const erofs_nid_t nid = *(erofs_nid_t *)opaque;
-
- return EROFS_V(inode)->nid == nid;
-}
-
-static int erofs_iget_set_actor(struct inode *inode, void *opaque)
-{
- const erofs_nid_t nid = *(erofs_nid_t *)opaque;
-
- inode->i_ino = erofs_inode_hash(nid);
- return 0;
-}
-#endif
-
-static inline struct inode *erofs_iget_locked(struct super_block *sb,
- erofs_nid_t nid)
-{
- const unsigned long hashval = erofs_inode_hash(nid);
-
-#if BITS_PER_LONG >= 64
- /* it is safe to use iget_locked for >= 64-bit platform */
- return iget_locked(sb, hashval);
-#else
- return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
- erofs_iget_set_actor, &nid);
-#endif
-}
-
-struct inode *erofs_iget(struct super_block *sb,
- erofs_nid_t nid,
- bool isdir)
-{
- struct inode *inode = erofs_iget_locked(sb, nid);
-
- if (unlikely(!inode))
- return ERR_PTR(-ENOMEM);
-
- if (inode->i_state & I_NEW) {
- int err;
- struct erofs_vnode *vi = EROFS_V(inode);
-
- vi->nid = nid;
-
- err = fill_inode(inode, isdir);
- if (likely(!err))
- unlock_new_inode(inode);
- else {
- iget_failed(inode);
- inode = ERR_PTR(err);
- }
- }
- return inode;
-}
-
-int erofs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
-{
- struct inode *const inode = d_inode(path->dentry);
-
- if (is_inode_layout_compression(inode))
- stat->attributes |= STATX_ATTR_COMPRESSED;
-
- stat->attributes |= STATX_ATTR_IMMUTABLE;
- stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
- STATX_ATTR_IMMUTABLE);
-
- generic_fillattr(inode, stat);
- return 0;
-}
-
-const struct inode_operations erofs_generic_iops = {
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
-const struct inode_operations erofs_symlink_iops = {
- .get_link = page_get_link,
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
-const struct inode_operations erofs_fast_symlink_iops = {
- .get_link = simple_get_link,
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
deleted file mode 100644
index 0e8d58546c52..000000000000
--- a/drivers/staging/erofs/internal.h
+++ /dev/null
@@ -1,554 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/staging/erofs/internal.h
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_INTERNAL_H
-#define __EROFS_INTERNAL_H
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/bio.h>
-#include <linux/buffer_head.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "erofs_fs.h"
-
-/* redefine pr_fmt "erofs: " */
-#undef pr_fmt
-#define pr_fmt(fmt) "erofs: " fmt
-
-#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
-#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
-#ifdef CONFIG_EROFS_FS_DEBUG
-#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
-#define DBG_BUGON BUG_ON
-#else
-#define debugln(x, ...) ((void)0)
-#define DBG_BUGON(x) ((void)(x))
-#endif /* !CONFIG_EROFS_FS_DEBUG */
-
-enum {
- FAULT_KMALLOC,
- FAULT_READ_IO,
- FAULT_MAX,
-};
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
-extern const char *erofs_fault_name[FAULT_MAX];
-#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
-
-struct erofs_fault_info {
- atomic_t inject_ops;
- unsigned int inject_rate;
- unsigned int inject_type;
-};
-#endif /* CONFIG_EROFS_FAULT_INJECTION */
-
-/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
-#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
-
-typedef u64 erofs_nid_t;
-typedef u64 erofs_off_t;
-/* data type for filesystem-wide blocks number */
-typedef u32 erofs_blk_t;
-
-struct erofs_sb_info {
-#ifdef CONFIG_EROFS_FS_ZIP
- /* list for all registered superblocks, mainly for shrinker */
- struct list_head list;
- struct mutex umount_mutex;
-
- /* the dedicated workstation for compression */
- struct radix_tree_root workstn_tree;
-
- /* threshold for decompression synchronously */
- unsigned int max_sync_decompress_pages;
-
- unsigned int shrinker_run_no;
-
- /* current strategy of how to use managed cache */
- unsigned char cache_strategy;
-
- /* pseudo inode to manage cached pages */
- struct inode *managed_cache;
-#endif /* CONFIG_EROFS_FS_ZIP */
- u32 blocks;
- u32 meta_blkaddr;
-#ifdef CONFIG_EROFS_FS_XATTR
- u32 xattr_blkaddr;
-#endif
-
- /* inode slot unit size in bit shift */
- unsigned char islotbits;
-
- u32 build_time_nsec;
- u64 build_time;
-
- /* what we really care is nid, rather than ino.. */
- erofs_nid_t root_nid;
- /* used for statfs, f_files - f_favail */
- u64 inos;
-
- u8 uuid[16]; /* 128-bit uuid for volume */
- u8 volume_name[16]; /* volume name */
- u32 requirements;
-
- unsigned int mount_opt;
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
- struct erofs_fault_info fault_info; /* For fault injection */
-#endif
-};
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
-#define erofs_show_injection_info(type) \
- infoln("inject %s in %s of %pS", erofs_fault_name[type], \
- __func__, __builtin_return_address(0))
-
-static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
-{
- struct erofs_fault_info *ffi = &sbi->fault_info;
-
- if (!ffi->inject_rate)
- return false;
-
- if (!IS_FAULT_SET(ffi, type))
- return false;
-
- atomic_inc(&ffi->inject_ops);
- if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
- atomic_set(&ffi->inject_ops, 0);
- return true;
- }
- return false;
-}
-#else
-static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
-{
- return false;
-}
-
-static inline void erofs_show_injection_info(int type)
-{
-}
-#endif /* !CONFIG_EROFS_FAULT_INJECTION */
-
-static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
- size_t size, gfp_t flags)
-{
- if (time_to_inject(sbi, FAULT_KMALLOC)) {
- erofs_show_injection_info(FAULT_KMALLOC);
- return NULL;
- }
- return kmalloc(size, flags);
-}
-
-#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
-#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
-
-/* Mount flags set via mount options or defaults */
-#define EROFS_MOUNT_XATTR_USER 0x00000010
-#define EROFS_MOUNT_POSIX_ACL 0x00000020
-#define EROFS_MOUNT_FAULT_INJECTION 0x00000040
-
-#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
-#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
-#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
-
-#ifdef CONFIG_EROFS_FS_ZIP
-enum {
- EROFS_ZIP_CACHE_DISABLED,
- EROFS_ZIP_CACHE_READAHEAD,
- EROFS_ZIP_CACHE_READAROUND
-};
-
-#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-
-/* basic unit of the workstation of a super_block */
-struct erofs_workgroup {
- /* the workgroup index in the workstation */
- pgoff_t index;
-
- /* overall workgroup reference count */
- atomic_t refcount;
-};
-
-#if defined(CONFIG_SMP)
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
-{
- /*
- * other observers should notice all modifications
- * in the freezing period.
- */
- smp_mb();
- atomic_set(&grp->refcount, orig_val);
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- return atomic_cond_read_relaxed(&grp->refcount,
- VAL != EROFS_LOCKED_MAGIC);
-}
-#else
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- /* no need to spin on UP platforms, let's just disable preemption. */
- if (val != atomic_read(&grp->refcount)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
-{
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- int v = atomic_read(&grp->refcount);
-
- /* workgroup is never freezed on uniprocessor systems */
- DBG_BUGON(v == EROFS_LOCKED_MAGIC);
- return v;
-}
-#endif /* !CONFIG_SMP */
-
-/* hard limit of pages per compressed cluster */
-#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
-#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
-#else
-#define EROFS_PCPUBUF_NR_PAGES 0
-#endif /* !CONFIG_EROFS_FS_ZIP */
-
-/* we strictly follow PAGE_SIZE and no buffer head yet */
-#define LOG_BLOCK_SIZE PAGE_SHIFT
-
-#undef LOG_SECTORS_PER_BLOCK
-#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
-
-#undef SECTORS_PER_BLOCK
-#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
-
-#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
-
-#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
-#error erofs cannot be used in this platform
-#endif
-
-#define EROFS_IO_MAX_RETRIES_NOFAIL 5
-
-#define ROOT_NID(sb) ((sb)->root_nid)
-
-#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
-#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
-#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
-
-static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
-{
- return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
-}
-
-/* atomic flag definitions */
-#define EROFS_V_EA_INITED_BIT 0
-#define EROFS_V_Z_INITED_BIT 1
-
-/* bitlock definitions (arranged in reverse order) */
-#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
-#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2)
-
-struct erofs_vnode {
- erofs_nid_t nid;
-
- /* atomic flags (including bitlocks) */
- unsigned long flags;
-
- unsigned char datamode;
- unsigned char inode_isize;
- unsigned short xattr_isize;
-
- unsigned int xattr_shared_count;
- unsigned int *xattr_shared_xattrs;
-
- union {
- erofs_blk_t raw_blkaddr;
-#ifdef CONFIG_EROFS_FS_ZIP
- struct {
- unsigned short z_advise;
- unsigned char z_algorithmtype[2];
- unsigned char z_logical_clusterbits;
- unsigned char z_physical_clusterbits[2];
- };
-#endif /* CONFIG_EROFS_FS_ZIP */
- };
- /* the corresponding vfs inode */
- struct inode vfs_inode;
-};
-
-#define EROFS_V(ptr) \
- container_of(ptr, struct erofs_vnode, vfs_inode)
-
-#define __inode_advise(x, bit, bits) \
- (((x) >> (bit)) & ((1 << (bits)) - 1))
-
-#define __inode_version(advise) \
- __inode_advise(advise, EROFS_I_VERSION_BIT, \
- EROFS_I_VERSION_BITS)
-
-#define __inode_data_mapping(advise) \
- __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
- EROFS_I_DATA_MAPPING_BITS)
-
-static inline unsigned long inode_datablocks(struct inode *inode)
-{
- /* since i_size cannot be changed */
- return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
-}
-
-static inline bool is_inode_layout_compression(struct inode *inode)
-{
- return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode);
-}
-
-static inline bool is_inode_flat_inline(struct inode *inode)
-{
- return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE;
-}
-
-extern const struct super_operations erofs_sops;
-
-extern const struct address_space_operations erofs_raw_access_aops;
-#ifdef CONFIG_EROFS_FS_ZIP
-extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
-#endif
-
-/*
- * Logical to physical block mapping, used by erofs_map_blocks()
- *
- * Different with other file systems, it is used for 2 access modes:
- *
- * 1) RAW access mode:
- *
- * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
- * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
- *
- * Note that m_lblk in the RAW access mode refers to the number of
- * the compressed ondisk block rather than the uncompressed
- * in-memory block for the compressed file.
- *
- * m_pofs equals to m_lofs except for the inline data page.
- *
- * 2) Normal access mode:
- *
- * If the inode is not compressed, it has no difference with
- * the RAW access mode. However, if the inode is compressed,
- * users should pass a valid (m_lblk, m_lofs) pair, and get
- * the needed m_pblk, m_pofs, m_len to get the compressed data
- * and the updated m_lblk, m_lofs which indicates the start
- * of the corresponding uncompressed data in the file.
- */
-enum {
- BH_Zipped = BH_PrivateStart,
- BH_FullMapped,
-};
-
-/* Has a disk mapping */
-#define EROFS_MAP_MAPPED (1 << BH_Mapped)
-/* Located in metadata (could be copied from bd_inode) */
-#define EROFS_MAP_META (1 << BH_Meta)
-/* The extent has been compressed */
-#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
-/* The length of extent is full */
-#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
-
-struct erofs_map_blocks {
- erofs_off_t m_pa, m_la;
- u64 m_plen, m_llen;
-
- unsigned int m_flags;
-
- struct page *mpage;
-};
-
-/* Flags used by erofs_map_blocks() */
-#define EROFS_GET_BLOCKS_RAW 0x0001
-
-/* zmap.c */
-#ifdef CONFIG_EROFS_FS_ZIP
-int z_erofs_fill_inode(struct inode *inode);
-int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags);
-#else
-static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
-static inline int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- return -EOPNOTSUPP;
-}
-#endif /* !CONFIG_EROFS_FS_ZIP */
-
-/* data.c */
-static inline struct bio *erofs_grab_bio(struct super_block *sb,
- erofs_blk_t blkaddr,
- unsigned int nr_pages,
- void *bi_private, bio_end_io_t endio,
- bool nofail)
-{
- const gfp_t gfp = GFP_NOIO;
- struct bio *bio;
-
- do {
- if (nr_pages == 1) {
- bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
- if (unlikely(!bio)) {
- DBG_BUGON(nofail);
- return ERR_PTR(-ENOMEM);
- }
- break;
- }
- bio = bio_alloc(gfp, nr_pages);
- nr_pages /= 2;
- } while (unlikely(!bio));
-
- bio->bi_end_io = endio;
- bio_set_dev(bio, sb->s_bdev);
- bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
- bio->bi_private = bi_private;
- return bio;
-}
-
-static inline void __submit_bio(struct bio *bio, unsigned int op,
- unsigned int op_flags)
-{
- bio_set_op_attrs(bio, op, op_flags);
- submit_bio(bio);
-}
-
-struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr,
- bool prio, bool nofail);
-
-static inline struct page *erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio)
-{
- return __erofs_get_meta_page(sb, blkaddr, prio, false);
-}
-
-int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
-
-static inline struct page *erofs_get_inline_page(struct inode *inode,
- erofs_blk_t blkaddr)
-{
- return erofs_get_meta_page(inode->i_sb, blkaddr,
- S_ISDIR(inode->i_mode));
-}
-
-/* inode.c */
-static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
-{
-#if BITS_PER_LONG == 32
- return (nid >> 32) ^ (nid & 0xffffffff);
-#else
- return nid;
-#endif
-}
-
-extern const struct inode_operations erofs_generic_iops;
-extern const struct inode_operations erofs_symlink_iops;
-extern const struct inode_operations erofs_fast_symlink_iops;
-
-static inline void set_inode_fast_symlink(struct inode *inode)
-{
- inode->i_op = &erofs_fast_symlink_iops;
-}
-
-static inline bool is_inode_fast_symlink(struct inode *inode)
-{
- return inode->i_op == &erofs_fast_symlink_iops;
-}
-
-struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
-int erofs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags);
-
-/* namei.c */
-extern const struct inode_operations erofs_dir_iops;
-
-int erofs_namei(struct inode *dir, struct qstr *name,
- erofs_nid_t *nid, unsigned int *d_type);
-
-/* dir.c */
-extern const struct file_operations erofs_dir_fops;
-
-/* utils.c / zdata.c */
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
-
-#if (EROFS_PCPUBUF_NR_PAGES > 0)
-void *erofs_get_pcpubuf(unsigned int pagenr);
-#define erofs_put_pcpubuf(buf) do { \
- (void)&(buf); \
- preempt_enable(); \
-} while (0)
-#else
-static inline void *erofs_get_pcpubuf(unsigned int pagenr)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-#define erofs_put_pcpubuf(buf) do {} while (0)
-#endif
-
-#ifdef CONFIG_EROFS_FS_ZIP
-int erofs_workgroup_put(struct erofs_workgroup *grp);
-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag);
-int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp, bool tag);
-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
-void erofs_shrinker_register(struct super_block *sb);
-void erofs_shrinker_unregister(struct super_block *sb);
-int __init erofs_init_shrinker(void);
-void erofs_exit_shrinker(void);
-int __init z_erofs_init_zip_subsystem(void);
-void z_erofs_exit_zip_subsystem(void);
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp);
-int erofs_try_to_free_cached_page(struct address_space *mapping,
- struct page *page);
-#else
-static inline void erofs_shrinker_register(struct super_block *sb) {}
-static inline void erofs_shrinker_unregister(struct super_block *sb) {}
-static inline int erofs_init_shrinker(void) { return 0; }
-static inline void erofs_exit_shrinker(void) {}
-static inline int z_erofs_init_zip_subsystem(void) { return 0; }
-static inline void z_erofs_exit_zip_subsystem(void) {}
-#endif /* !CONFIG_EROFS_FS_ZIP */
-
-#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
-
-#endif /* __EROFS_INTERNAL_H */
-
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
deleted file mode 100644
index c0963f5a2d22..000000000000
--- a/drivers/staging/erofs/namei.c
+++ /dev/null
@@ -1,253 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/namei.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "xattr.h"
-
-#include <trace/events/erofs.h>
-
-struct erofs_qstr {
- const unsigned char *name;
- const unsigned char *end;
-};
-
-/* based on the end of qn is accurate and it must have the trailing '\0' */
-static inline int dirnamecmp(const struct erofs_qstr *qn,
- const struct erofs_qstr *qd,
- unsigned int *matched)
-{
- unsigned int i = *matched;
-
- /*
- * on-disk error, let's only BUG_ON in the debugging mode.
- * otherwise, it will return 1 to just skip the invalid name
- * and go on (in consideration of the lookup performance).
- */
- DBG_BUGON(qd->name > qd->end);
-
- /* qd could not have trailing '\0' */
- /* However it is absolutely safe if < qd->end */
- while (qd->name + i < qd->end && qd->name[i] != '\0') {
- if (qn->name[i] != qd->name[i]) {
- *matched = i;
- return qn->name[i] > qd->name[i] ? 1 : -1;
- }
- ++i;
- }
- *matched = i;
- /* See comments in __d_alloc on the terminating NUL character */
- return qn->name[i] == '\0' ? 0 : 1;
-}
-
-#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
-
-static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
- u8 *data,
- unsigned int dirblksize,
- const int ndirents)
-{
- int head, back;
- unsigned int startprfx, endprfx;
- struct erofs_dirent *const de = (struct erofs_dirent *)data;
-
- /* since the 1st dirent has been evaluated previously */
- head = 1;
- back = ndirents - 1;
- startprfx = endprfx = 0;
-
- while (head <= back) {
- const int mid = head + (back - head) / 2;
- const int nameoff = nameoff_from_disk(de[mid].nameoff,
- dirblksize);
- unsigned int matched = min(startprfx, endprfx);
- struct erofs_qstr dname = {
- .name = data + nameoff,
- .end = unlikely(mid >= ndirents - 1) ?
- data + dirblksize :
- data + nameoff_from_disk(de[mid + 1].nameoff,
- dirblksize)
- };
-
- /* string comparison without already matched prefix */
- int ret = dirnamecmp(name, &dname, &matched);
-
- if (unlikely(!ret)) {
- return de + mid;
- } else if (ret > 0) {
- head = mid + 1;
- startprfx = matched;
- } else {
- back = mid - 1;
- endprfx = matched;
- }
- }
-
- return ERR_PTR(-ENOENT);
-}
-
-static struct page *find_target_block_classic(struct inode *dir,
- struct erofs_qstr *name,
- int *_ndirents)
-{
- unsigned int startprfx, endprfx;
- int head, back;
- struct address_space *const mapping = dir->i_mapping;
- struct page *candidate = ERR_PTR(-ENOENT);
-
- startprfx = endprfx = 0;
- head = 0;
- back = inode_datablocks(dir) - 1;
-
- while (head <= back) {
- const int mid = head + (back - head) / 2;
- struct page *page = read_mapping_page(mapping, mid, NULL);
-
- if (!IS_ERR(page)) {
- struct erofs_dirent *de = kmap_atomic(page);
- const int nameoff = nameoff_from_disk(de->nameoff,
- EROFS_BLKSIZ);
- const int ndirents = nameoff / sizeof(*de);
- int diff;
- unsigned int matched;
- struct erofs_qstr dname;
-
- if (unlikely(!ndirents)) {
- kunmap_atomic(de);
- put_page(page);
- errln("corrupted dir block %d @ nid %llu",
- mid, EROFS_V(dir)->nid);
- DBG_BUGON(1);
- page = ERR_PTR(-EFSCORRUPTED);
- goto out;
- }
-
- matched = min(startprfx, endprfx);
-
- dname.name = (u8 *)de + nameoff;
- if (ndirents == 1)
- dname.end = (u8 *)de + EROFS_BLKSIZ;
- else
- dname.end = (u8 *)de +
- nameoff_from_disk(de[1].nameoff,
- EROFS_BLKSIZ);
-
- /* string comparison without already matched prefix */
- diff = dirnamecmp(name, &dname, &matched);
- kunmap_atomic(de);
-
- if (unlikely(!diff)) {
- *_ndirents = 0;
- goto out;
- } else if (diff > 0) {
- head = mid + 1;
- startprfx = matched;
-
- if (!IS_ERR(candidate))
- put_page(candidate);
- candidate = page;
- *_ndirents = ndirents;
- } else {
- put_page(page);
-
- back = mid - 1;
- endprfx = matched;
- }
- continue;
- }
-out: /* free if the candidate is valid */
- if (!IS_ERR(candidate))
- put_page(candidate);
- return page;
- }
- return candidate;
-}
-
-int erofs_namei(struct inode *dir,
- struct qstr *name,
- erofs_nid_t *nid, unsigned int *d_type)
-{
- int ndirents;
- struct page *page;
- void *data;
- struct erofs_dirent *de;
- struct erofs_qstr qn;
-
- if (unlikely(!dir->i_size))
- return -ENOENT;
-
- qn.name = name->name;
- qn.end = name->name + name->len;
-
- ndirents = 0;
- page = find_target_block_classic(dir, &qn, &ndirents);
-
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- data = kmap_atomic(page);
- /* the target page has been mapped */
- if (ndirents)
- de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
- else
- de = (struct erofs_dirent *)data;
-
- if (!IS_ERR(de)) {
- *nid = le64_to_cpu(de->nid);
- *d_type = de->file_type;
- }
-
- kunmap_atomic(data);
- put_page(page);
-
- return PTR_ERR_OR_ZERO(de);
-}
-
-/* NOTE: i_mutex is already held by vfs */
-static struct dentry *erofs_lookup(struct inode *dir,
- struct dentry *dentry,
- unsigned int flags)
-{
- int err;
- erofs_nid_t nid;
- unsigned int d_type;
- struct inode *inode;
-
- DBG_BUGON(!d_really_is_negative(dentry));
- /* dentry must be unhashed in lookup, no need to worry about */
- DBG_BUGON(!d_unhashed(dentry));
-
- trace_erofs_lookup(dir, dentry, flags);
-
- /* file name exceeds fs limit */
- if (unlikely(dentry->d_name.len > EROFS_NAME_LEN))
- return ERR_PTR(-ENAMETOOLONG);
-
- /* false uninitialized warnings on gcc 4.8.x */
- err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
-
- if (err == -ENOENT) {
- /* negative dentry */
- inode = NULL;
- } else if (unlikely(err)) {
- inode = ERR_PTR(err);
- } else {
- debugln("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
- inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
- }
- return d_splice_alias(inode, dentry);
-}
-
-const struct inode_operations erofs_dir_iops = {
- .lookup = erofs_lookup,
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
deleted file mode 100644
index f65a1ff9f42f..000000000000
--- a/drivers/staging/erofs/super.c
+++ /dev/null
@@ -1,666 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/super.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include <linux/module.h>
-#include <linux/buffer_head.h>
-#include <linux/statfs.h>
-#include <linux/parser.h>
-#include <linux/seq_file.h>
-#include "xattr.h"
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/erofs.h>
-
-static struct kmem_cache *erofs_inode_cachep __read_mostly;
-
-static void init_once(void *ptr)
-{
- struct erofs_vnode *vi = ptr;
-
- inode_init_once(&vi->vfs_inode);
-}
-
-static int __init erofs_init_inode_cache(void)
-{
- erofs_inode_cachep = kmem_cache_create("erofs_inode",
- sizeof(struct erofs_vnode), 0,
- SLAB_RECLAIM_ACCOUNT,
- init_once);
-
- return erofs_inode_cachep ? 0 : -ENOMEM;
-}
-
-static void erofs_exit_inode_cache(void)
-{
- kmem_cache_destroy(erofs_inode_cachep);
-}
-
-static struct inode *alloc_inode(struct super_block *sb)
-{
- struct erofs_vnode *vi =
- kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
-
- if (!vi)
- return NULL;
-
- /* zero out everything except vfs_inode */
- memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode));
- return &vi->vfs_inode;
-}
-
-static void free_inode(struct inode *inode)
-{
- struct erofs_vnode *vi = EROFS_V(inode);
-
- /* be careful RCU symlink path (see ext4_inode_info->i_data)! */
- if (is_inode_fast_symlink(inode))
- kfree(inode->i_link);
-
- kfree(vi->xattr_shared_xattrs);
-
- kmem_cache_free(erofs_inode_cachep, vi);
-}
-
-static bool check_layout_compatibility(struct super_block *sb,
- struct erofs_super_block *layout)
-{
- const unsigned int requirements = le32_to_cpu(layout->requirements);
-
- EROFS_SB(sb)->requirements = requirements;
-
- /* check if current kernel meets all mandatory requirements */
- if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
- errln("unidentified requirements %x, please upgrade kernel version",
- requirements & ~EROFS_ALL_REQUIREMENTS);
- return false;
- }
- return true;
-}
-
-static int superblock_read(struct super_block *sb)
-{
- struct erofs_sb_info *sbi;
- struct buffer_head *bh;
- struct erofs_super_block *layout;
- unsigned int blkszbits;
- int ret;
-
- bh = sb_bread(sb, 0);
-
- if (!bh) {
- errln("cannot read erofs superblock");
- return -EIO;
- }
-
- sbi = EROFS_SB(sb);
- layout = (struct erofs_super_block *)((u8 *)bh->b_data
- + EROFS_SUPER_OFFSET);
-
- ret = -EINVAL;
- if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) {
- errln("cannot find valid erofs superblock");
- goto out;
- }
-
- blkszbits = layout->blkszbits;
- /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
- if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
- errln("blksize %u isn't supported on this platform",
- 1 << blkszbits);
- goto out;
- }
-
- if (!check_layout_compatibility(sb, layout))
- goto out;
-
- sbi->blocks = le32_to_cpu(layout->blocks);
- sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
-#ifdef CONFIG_EROFS_FS_XATTR
- sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr);
-#endif
- sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
- sbi->root_nid = le16_to_cpu(layout->root_nid);
- sbi->inos = le64_to_cpu(layout->inos);
-
- sbi->build_time = le64_to_cpu(layout->build_time);
- sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec);
-
- memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
- memcpy(sbi->volume_name, layout->volume_name,
- sizeof(layout->volume_name));
-
- ret = 0;
-out:
- brelse(bh);
- return ret;
-}
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
-const char *erofs_fault_name[FAULT_MAX] = {
- [FAULT_KMALLOC] = "kmalloc",
- [FAULT_READ_IO] = "read IO error",
-};
-
-static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
- unsigned int rate)
-{
- struct erofs_fault_info *ffi = &sbi->fault_info;
-
- if (rate) {
- atomic_set(&ffi->inject_ops, 0);
- ffi->inject_rate = rate;
- ffi->inject_type = (1 << FAULT_MAX) - 1;
- } else {
- memset(ffi, 0, sizeof(struct erofs_fault_info));
- }
-
- set_opt(sbi, FAULT_INJECTION);
-}
-
-static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
- substring_t *args)
-{
- int rate = 0;
-
- if (args->from && match_int(args, &rate))
- return -EINVAL;
-
- __erofs_build_fault_attr(sbi, rate);
- return 0;
-}
-
-static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
-{
- return sbi->fault_info.inject_rate;
-}
-#else
-static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
- unsigned int rate)
-{
-}
-
-static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
- substring_t *args)
-{
- infoln("fault_injection options not supported");
- return 0;
-}
-
-static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_EROFS_FS_ZIP
-static int erofs_build_cache_strategy(struct erofs_sb_info *sbi,
- substring_t *args)
-{
- const char *cs = match_strdup(args);
- int err = 0;
-
- if (!cs) {
- errln("Not enough memory to store cache strategy");
- return -ENOMEM;
- }
-
- if (!strcmp(cs, "disabled")) {
- sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED;
- } else if (!strcmp(cs, "readahead")) {
- sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD;
- } else if (!strcmp(cs, "readaround")) {
- sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- } else {
- errln("Unrecognized cache strategy \"%s\"", cs);
- err = -EINVAL;
- }
- kfree(cs);
- return err;
-}
-#else
-static int erofs_build_cache_strategy(struct erofs_sb_info *sbi,
- substring_t *args)
-{
- infoln("EROFS compression is disabled, so cache strategy is ignored");
- return 0;
-}
-#endif
-
-/* set up default EROFS parameters */
-static void default_options(struct erofs_sb_info *sbi)
-{
-#ifdef CONFIG_EROFS_FS_ZIP
- sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- sbi->max_sync_decompress_pages = 3;
-#endif
-#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(sbi, XATTR_USER);
-#endif
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(sbi, POSIX_ACL);
-#endif
-}
-
-enum {
- Opt_user_xattr,
- Opt_nouser_xattr,
- Opt_acl,
- Opt_noacl,
- Opt_fault_injection,
- Opt_cache_strategy,
- Opt_err
-};
-
-static match_table_t erofs_tokens = {
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_fault_injection, "fault_injection=%u"},
- {Opt_cache_strategy, "cache_strategy=%s"},
- {Opt_err, NULL}
-};
-
-static int parse_options(struct super_block *sb, char *options)
-{
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int err;
-
- if (!options)
- return 0;
-
- while ((p = strsep(&options, ","))) {
- int token;
-
- if (!*p)
- continue;
-
- args[0].to = args[0].from = NULL;
- token = match_token(p, erofs_tokens, args);
-
- switch (token) {
-#ifdef CONFIG_EROFS_FS_XATTR
- case Opt_user_xattr:
- set_opt(EROFS_SB(sb), XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt(EROFS_SB(sb), XATTR_USER);
- break;
-#else
- case Opt_user_xattr:
- infoln("user_xattr options not supported");
- break;
- case Opt_nouser_xattr:
- infoln("nouser_xattr options not supported");
- break;
-#endif
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- case Opt_acl:
- set_opt(EROFS_SB(sb), POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(EROFS_SB(sb), POSIX_ACL);
- break;
-#else
- case Opt_acl:
- infoln("acl options not supported");
- break;
- case Opt_noacl:
- infoln("noacl options not supported");
- break;
-#endif
- case Opt_fault_injection:
- err = erofs_build_fault_attr(EROFS_SB(sb), args);
- if (err)
- return err;
- break;
- case Opt_cache_strategy:
- err = erofs_build_cache_strategy(EROFS_SB(sb), args);
- if (err)
- return err;
- break;
- default:
- errln("Unrecognized mount option \"%s\" or missing value", p);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-#ifdef CONFIG_EROFS_FS_ZIP
-static const struct address_space_operations managed_cache_aops;
-
-static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
-{
- int ret = 1; /* 0 - busy */
- struct address_space *const mapping = page->mapping;
-
- DBG_BUGON(!PageLocked(page));
- DBG_BUGON(mapping->a_ops != &managed_cache_aops);
-
- if (PagePrivate(page))
- ret = erofs_try_to_free_cached_page(mapping, page);
-
- return ret;
-}
-
-static void managed_cache_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
-{
- const unsigned int stop = length + offset;
-
- DBG_BUGON(!PageLocked(page));
-
- /* Check for potential overflow in debug mode */
- DBG_BUGON(stop > PAGE_SIZE || stop < length);
-
- if (offset == 0 && stop == PAGE_SIZE)
- while (!managed_cache_releasepage(page, GFP_NOFS))
- cond_resched();
-}
-
-static const struct address_space_operations managed_cache_aops = {
- .releasepage = managed_cache_releasepage,
- .invalidatepage = managed_cache_invalidatepage,
-};
-
-static int erofs_init_managed_cache(struct super_block *sb)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- struct inode *const inode = new_inode(sb);
-
- if (unlikely(!inode))
- return -ENOMEM;
-
- set_nlink(inode, 1);
- inode->i_size = OFFSET_MAX;
-
- inode->i_mapping->a_ops = &managed_cache_aops;
- mapping_set_gfp_mask(inode->i_mapping,
- GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
- sbi->managed_cache = inode;
- return 0;
-}
-#else
-static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
-#endif
-
-static int erofs_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct inode *inode;
- struct erofs_sb_info *sbi;
- int err;
-
- infoln("fill_super, device -> %s", sb->s_id);
- infoln("options -> %s", (char *)data);
-
- sb->s_magic = EROFS_SUPER_MAGIC;
-
- if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) {
- errln("failed to set erofs blksize");
- return -EINVAL;
- }
-
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (unlikely(!sbi))
- return -ENOMEM;
-
- sb->s_fs_info = sbi;
- err = superblock_read(sb);
- if (err)
- return err;
-
- sb->s_flags |= SB_RDONLY | SB_NOATIME;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_time_gran = 1;
-
- sb->s_op = &erofs_sops;
-
-#ifdef CONFIG_EROFS_FS_XATTR
- sb->s_xattr = erofs_xattr_handlers;
-#endif
- /* set erofs default mount options */
- default_options(sbi);
-
- err = parse_options(sb, data);
- if (unlikely(err))
- return err;
-
- if (!silent)
- infoln("root inode @ nid %llu", ROOT_NID(sbi));
-
- if (test_opt(sbi, POSIX_ACL))
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
-
-#ifdef CONFIG_EROFS_FS_ZIP
- INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
-#endif
-
- /* get the root inode */
- inode = erofs_iget(sb, ROOT_NID(sbi), true);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- if (unlikely(!S_ISDIR(inode->i_mode))) {
- errln("rootino(nid %llu) is not a directory(i_mode %o)",
- ROOT_NID(sbi), inode->i_mode);
- iput(inode);
- return -EINVAL;
- }
-
- sb->s_root = d_make_root(inode);
- if (unlikely(!sb->s_root))
- return -ENOMEM;
-
- erofs_shrinker_register(sb);
- /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
- err = erofs_init_managed_cache(sb);
- if (unlikely(err))
- return err;
-
- if (!silent)
- infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data);
- return 0;
-}
-
-static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super);
-}
-
-/*
- * could be triggered after deactivate_locked_super()
- * is called, thus including umount and failed to initialize.
- */
-static void erofs_kill_sb(struct super_block *sb)
-{
- struct erofs_sb_info *sbi;
-
- WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
- infoln("unmounting for %s", sb->s_id);
-
- kill_block_super(sb);
-
- sbi = EROFS_SB(sb);
- if (!sbi)
- return;
- kfree(sbi);
- sb->s_fs_info = NULL;
-}
-
-/* called when ->s_root is non-NULL */
-static void erofs_put_super(struct super_block *sb)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
-
- DBG_BUGON(!sbi);
-
- erofs_shrinker_unregister(sb);
-#ifdef CONFIG_EROFS_FS_ZIP
- iput(sbi->managed_cache);
- sbi->managed_cache = NULL;
-#endif
-}
-
-static struct file_system_type erofs_fs_type = {
- .owner = THIS_MODULE,
- .name = "erofs",
- .mount = erofs_mount,
- .kill_sb = erofs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("erofs");
-
-static int __init erofs_module_init(void)
-{
- int err;
-
- erofs_check_ondisk_layout_definitions();
- infoln("initializing erofs " EROFS_VERSION);
-
- err = erofs_init_inode_cache();
- if (err)
- goto icache_err;
-
- err = erofs_init_shrinker();
- if (err)
- goto shrinker_err;
-
- err = z_erofs_init_zip_subsystem();
- if (err)
- goto zip_err;
-
- err = register_filesystem(&erofs_fs_type);
- if (err)
- goto fs_err;
-
- infoln("successfully to initialize erofs");
- return 0;
-
-fs_err:
- z_erofs_exit_zip_subsystem();
-zip_err:
- erofs_exit_shrinker();
-shrinker_err:
- erofs_exit_inode_cache();
-icache_err:
- return err;
-}
-
-static void __exit erofs_module_exit(void)
-{
- unregister_filesystem(&erofs_fs_type);
- z_erofs_exit_zip_subsystem();
- erofs_exit_shrinker();
- erofs_exit_inode_cache();
- infoln("successfully finalize erofs");
-}
-
-/* get filesystem statistics */
-static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
-
- buf->f_type = sb->s_magic;
- buf->f_bsize = EROFS_BLKSIZ;
- buf->f_blocks = sbi->blocks;
- buf->f_bfree = buf->f_bavail = 0;
-
- buf->f_files = ULLONG_MAX;
- buf->f_ffree = ULLONG_MAX - sbi->inos;
-
- buf->f_namelen = EROFS_NAME_LEN;
-
- buf->f_fsid.val[0] = (u32)id;
- buf->f_fsid.val[1] = (u32)(id >> 32);
- return 0;
-}
-
-static int erofs_show_options(struct seq_file *seq, struct dentry *root)
-{
- struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
-
-#ifdef CONFIG_EROFS_FS_XATTR
- if (test_opt(sbi, XATTR_USER))
- seq_puts(seq, ",user_xattr");
- else
- seq_puts(seq, ",nouser_xattr");
-#endif
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- if (test_opt(sbi, POSIX_ACL))
- seq_puts(seq, ",acl");
- else
- seq_puts(seq, ",noacl");
-#endif
- if (test_opt(sbi, FAULT_INJECTION))
- seq_printf(seq, ",fault_injection=%u",
- erofs_get_fault_rate(sbi));
-#ifdef CONFIG_EROFS_FS_ZIP
- if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) {
- seq_puts(seq, ",cache_strategy=disabled");
- } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) {
- seq_puts(seq, ",cache_strategy=readahead");
- } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
- seq_puts(seq, ",cache_strategy=readaround");
- } else {
- seq_puts(seq, ",cache_strategy=(unknown)");
- DBG_BUGON(1);
- }
-#endif
- return 0;
-}
-
-static int erofs_remount(struct super_block *sb, int *flags, char *data)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- unsigned int org_mnt_opt = sbi->mount_opt;
- unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
- int err;
-
- DBG_BUGON(!sb_rdonly(sb));
- err = parse_options(sb, data);
- if (err)
- goto out;
-
- if (test_opt(sbi, POSIX_ACL))
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
-
- *flags |= SB_RDONLY;
- return 0;
-out:
- __erofs_build_fault_attr(sbi, org_inject_rate);
- sbi->mount_opt = org_mnt_opt;
-
- return err;
-}
-
-const struct super_operations erofs_sops = {
- .put_super = erofs_put_super,
- .alloc_inode = alloc_inode,
- .free_inode = free_inode,
- .statfs = erofs_statfs,
- .show_options = erofs_show_options,
- .remount_fs = erofs_remount,
-};
-
-module_init(erofs_module_init);
-module_exit(erofs_module_exit);
-
-MODULE_DESCRIPTION("Enhanced ROM File System");
-MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/erofs/tagptr.h b/drivers/staging/erofs/tagptr.h
deleted file mode 100644
index a72897c86744..000000000000
--- a/drivers/staging/erofs/tagptr.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * A tagged pointer implementation
- *
- * Copyright (C) 2018 Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_FS_TAGPTR_H
-#define __EROFS_FS_TAGPTR_H
-
-#include <linux/types.h>
-#include <linux/build_bug.h>
-
-/*
- * the name of tagged pointer types are tagptr{1, 2, 3...}_t
- * avoid directly using the internal structs __tagptr{1, 2, 3...}
- */
-#define __MAKE_TAGPTR(n) \
-typedef struct __tagptr##n { \
- uintptr_t v; \
-} tagptr##n##_t;
-
-__MAKE_TAGPTR(1)
-__MAKE_TAGPTR(2)
-__MAKE_TAGPTR(3)
-__MAKE_TAGPTR(4)
-
-#undef __MAKE_TAGPTR
-
-extern void __compiletime_error("bad tagptr tags")
- __bad_tagptr_tags(void);
-
-extern void __compiletime_error("bad tagptr type")
- __bad_tagptr_type(void);
-
-/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
-#define __tagptr_mask_1(ptr, n) \
- __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
- (1UL << (n)) - 1 :
-
-#define __tagptr_mask(ptr) (\
- __tagptr_mask_1(ptr, 1) ( \
- __tagptr_mask_1(ptr, 2) ( \
- __tagptr_mask_1(ptr, 3) ( \
- __tagptr_mask_1(ptr, 4) ( \
- __bad_tagptr_type(), 0)))))
-
-/* generate a tagged pointer from a raw value */
-#define tagptr_init(type, val) \
- ((typeof(type)){ .v = (uintptr_t)(val) })
-
-/*
- * directly cast a tagged pointer to the native pointer type, which
- * could be used for backward compatibility of existing code.
- */
-#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
-
-/* encode tagged pointers */
-#define tagptr_fold(type, ptr, _tags) ({ \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
- __bad_tagptr_tags(); \
-tagptr_init(type, (uintptr_t)(ptr) | tags); })
-
-/* decode tagged pointers */
-#define tagptr_unfold_ptr(tptr) \
- ((void *)((tptr).v & ~__tagptr_mask(tptr)))
-
-#define tagptr_unfold_tags(tptr) \
- ((tptr).v & __tagptr_mask(tptr))
-
-/* operations for the tagger pointer */
-#define tagptr_eq(_tptr1, _tptr2) ({ \
- typeof(_tptr1) tptr1 = (_tptr1); \
- typeof(_tptr2) tptr2 = (_tptr2); \
- (void)(&tptr1 == &tptr2); \
-(tptr1).v == (tptr2).v; })
-
-/* lock-free CAS operation */
-#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- typeof(_o) o = (_o); \
- typeof(_n) n = (_n); \
- (void)(&o == &n); \
- (void)(&o == ptptr); \
-tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
-
-/* wrap WRITE_ONCE if atomic update is needed */
-#define tagptr_replace_tags(_ptptr, tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
-*ptptr; })
-
-#define tagptr_set_tags(_ptptr, _tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
- __bad_tagptr_tags(); \
- ptptr->v |= tags; \
-*ptptr; })
-
-#define tagptr_clear_tags(_ptptr, _tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
- __bad_tagptr_tags(); \
- ptptr->v &= ~tags; \
-*ptptr; })
-
-#endif /* __EROFS_FS_TAGPTR_H */
-
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
deleted file mode 100644
index 814c2ee037ae..000000000000
--- a/drivers/staging/erofs/utils.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/utils.c
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "internal.h"
-#include <linux/pagevec.h>
-
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
-{
- struct page *page;
-
- if (!list_empty(pool)) {
- page = lru_to_page(pool);
- DBG_BUGON(page_ref_count(page) != 1);
- list_del(&page->lru);
- } else {
- page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
- }
- return page;
-}
-
-#if (EROFS_PCPUBUF_NR_PAGES > 0)
-static struct {
- u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
-} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
-
-void *erofs_get_pcpubuf(unsigned int pagenr)
-{
- preempt_disable();
- return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
-}
-#endif
-
-#ifdef CONFIG_EROFS_FS_ZIP
-/* global shrink count (for all mounted EROFS instances) */
-static atomic_long_t erofs_global_shrink_cnt;
-
-#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
-#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
-
-static int erofs_workgroup_get(struct erofs_workgroup *grp)
-{
- int o;
-
-repeat:
- o = erofs_wait_on_workgroup_freezed(grp);
- if (unlikely(o <= 0))
- return -1;
-
- if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
- goto repeat;
-
- /* decrease refcount paired by erofs_workgroup_put */
- if (unlikely(o == 1))
- atomic_long_dec(&erofs_global_shrink_cnt);
- return 0;
-}
-
-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_workgroup *grp;
-
-repeat:
- rcu_read_lock();
- grp = radix_tree_lookup(&sbi->workstn_tree, index);
- if (grp) {
- *tag = xa_pointer_tag(grp);
- grp = xa_untag_pointer(grp);
-
- if (erofs_workgroup_get(grp)) {
- /* prefer to relax rcu read side */
- rcu_read_unlock();
- goto repeat;
- }
-
- DBG_BUGON(index != grp->index);
- }
- rcu_read_unlock();
- return grp;
-}
-
-int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp,
- bool tag)
-{
- struct erofs_sb_info *sbi;
- int err;
-
- /* grp shouldn't be broken or used before */
- if (unlikely(atomic_read(&grp->refcount) != 1)) {
- DBG_BUGON(1);
- return -EINVAL;
- }
-
- err = radix_tree_preload(GFP_NOFS);
- if (err)
- return err;
-
- sbi = EROFS_SB(sb);
- xa_lock(&sbi->workstn_tree);
-
- grp = xa_tag_pointer(grp, tag);
-
- /*
- * Bump up reference count before making this workgroup
- * visible to other users in order to avoid potential UAF
- * without serialized by workstn_lock.
- */
- __erofs_workgroup_get(grp);
-
- err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
- if (unlikely(err))
- /*
- * it's safe to decrease since the workgroup isn't visible
- * and refcount >= 2 (cannot be freezed).
- */
- __erofs_workgroup_put(grp);
-
- xa_unlock(&sbi->workstn_tree);
- radix_tree_preload_end();
- return err;
-}
-
-static void __erofs_workgroup_free(struct erofs_workgroup *grp)
-{
- atomic_long_dec(&erofs_global_shrink_cnt);
- erofs_workgroup_free_rcu(grp);
-}
-
-int erofs_workgroup_put(struct erofs_workgroup *grp)
-{
- int count = atomic_dec_return(&grp->refcount);
-
- if (count == 1)
- atomic_long_inc(&erofs_global_shrink_cnt);
- else if (!count)
- __erofs_workgroup_free(grp);
- return count;
-}
-
-static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
-{
- erofs_workgroup_unfreeze(grp, 0);
- __erofs_workgroup_free(grp);
-}
-
-static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
-{
- /*
- * If managed cache is on, refcount of workgroups
- * themselves could be < 0 (freezed). In other words,
- * there is no guarantee that all refcounts > 0.
- */
- if (!erofs_workgroup_try_to_freeze(grp, 1))
- return false;
-
- /*
- * Note that all cached pages should be unattached
- * before deleted from the radix tree. Otherwise some
- * cached pages could be still attached to the orphan
- * old workgroup when the new one is available in the tree.
- */
- if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
- erofs_workgroup_unfreeze(grp, 1);
- return false;
- }
-
- /*
- * It's impossible to fail after the workgroup is freezed,
- * however in order to avoid some race conditions, add a
- * DBG_BUGON to observe this in advance.
- */
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
- grp->index)) != grp);
-
- /*
- * If managed cache is on, last refcount should indicate
- * the related workstation.
- */
- erofs_workgroup_unfreeze_final(grp);
- return true;
-}
-
-static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink,
- bool cleanup)
-{
- pgoff_t first_index = 0;
- void *batch[PAGEVEC_SIZE];
- unsigned int freed = 0;
-
- int i, found;
-repeat:
- xa_lock(&sbi->workstn_tree);
-
- found = radix_tree_gang_lookup(&sbi->workstn_tree,
- batch, first_index, PAGEVEC_SIZE);
-
- for (i = 0; i < found; ++i) {
- struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
-
- first_index = grp->index + 1;
-
- /* try to shrink each valid workgroup */
- if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
- continue;
-
- ++freed;
- if (unlikely(!--nr_shrink))
- break;
- }
- xa_unlock(&sbi->workstn_tree);
-
- if (i && nr_shrink)
- goto repeat;
- return freed;
-}
-
-/* protected by 'erofs_sb_list_lock' */
-static unsigned int shrinker_run_no;
-
-/* protects the mounted 'erofs_sb_list' */
-static DEFINE_SPINLOCK(erofs_sb_list_lock);
-static LIST_HEAD(erofs_sb_list);
-
-void erofs_shrinker_register(struct super_block *sb)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
-
- mutex_init(&sbi->umount_mutex);
-
- spin_lock(&erofs_sb_list_lock);
- list_add(&sbi->list, &erofs_sb_list);
- spin_unlock(&erofs_sb_list_lock);
-}
-
-void erofs_shrinker_unregister(struct super_block *sb)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
-
- mutex_lock(&sbi->umount_mutex);
- erofs_shrink_workstation(sbi, ~0UL, true);
-
- spin_lock(&erofs_sb_list_lock);
- list_del(&sbi->list);
- spin_unlock(&erofs_sb_list_lock);
- mutex_unlock(&sbi->umount_mutex);
-}
-
-static unsigned long erofs_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- return atomic_long_read(&erofs_global_shrink_cnt);
-}
-
-static unsigned long erofs_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct erofs_sb_info *sbi;
- struct list_head *p;
-
- unsigned long nr = sc->nr_to_scan;
- unsigned int run_no;
- unsigned long freed = 0;
-
- spin_lock(&erofs_sb_list_lock);
- do {
- run_no = ++shrinker_run_no;
- } while (run_no == 0);
-
- /* Iterate over all mounted superblocks and try to shrink them */
- p = erofs_sb_list.next;
- while (p != &erofs_sb_list) {
- sbi = list_entry(p, struct erofs_sb_info, list);
-
- /*
- * We move the ones we do to the end of the list, so we stop
- * when we see one we have already done.
- */
- if (sbi->shrinker_run_no == run_no)
- break;
-
- if (!mutex_trylock(&sbi->umount_mutex)) {
- p = p->next;
- continue;
- }
-
- spin_unlock(&erofs_sb_list_lock);
- sbi->shrinker_run_no = run_no;
-
- freed += erofs_shrink_workstation(sbi, nr, false);
-
- spin_lock(&erofs_sb_list_lock);
- /* Get the next list element before we move this one */
- p = p->next;
-
- /*
- * Move this one to the end of the list to provide some
- * fairness.
- */
- list_move_tail(&sbi->list, &erofs_sb_list);
- mutex_unlock(&sbi->umount_mutex);
-
- if (freed >= nr)
- break;
- }
- spin_unlock(&erofs_sb_list_lock);
- return freed;
-}
-
-static struct shrinker erofs_shrinker_info = {
- .scan_objects = erofs_shrink_scan,
- .count_objects = erofs_shrink_count,
- .seeks = DEFAULT_SEEKS,
-};
-
-int __init erofs_init_shrinker(void)
-{
- return register_shrinker(&erofs_shrinker_info);
-}
-
-void erofs_exit_shrinker(void)
-{
- unregister_shrinker(&erofs_shrinker_info);
-}
-#endif /* !CONFIG_EROFS_FS_ZIP */
-
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
deleted file mode 100644
index e7e5840e3f9d..000000000000
--- a/drivers/staging/erofs/xattr.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/xattr.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include <linux/security.h>
-#include "xattr.h"
-
-struct xattr_iter {
- struct super_block *sb;
- struct page *page;
- void *kaddr;
-
- erofs_blk_t blkaddr;
- unsigned int ofs;
-};
-
-static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
-{
- /* the only user of kunmap() is 'init_inode_xattrs' */
- if (unlikely(!atomic))
- kunmap(it->page);
- else
- kunmap_atomic(it->kaddr);
-
- unlock_page(it->page);
- put_page(it->page);
-}
-
-static inline void xattr_iter_end_final(struct xattr_iter *it)
-{
- if (!it->page)
- return;
-
- xattr_iter_end(it, true);
-}
-
-static int init_inode_xattrs(struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct xattr_iter it;
- unsigned int i;
- struct erofs_xattr_ibody_header *ih;
- struct super_block *sb;
- struct erofs_sb_info *sbi;
- bool atomic_map;
- int ret = 0;
-
- /* the most case is that xattrs of this inode are initialized. */
- if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
- return 0;
-
- if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
- return -ERESTARTSYS;
-
- /* someone has initialized xattrs for us? */
- if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
- goto out_unlock;
-
- /*
- * bypass all xattr operations if ->xattr_isize is not greater than
- * sizeof(struct erofs_xattr_ibody_header), in detail:
- * 1) it is not enough to contain erofs_xattr_ibody_header then
- * ->xattr_isize should be 0 (it means no xattr);
- * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
- * undefined right now (maybe use later with some new sb feature).
- */
- if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
- errln("xattr_isize %d of nid %llu is not supported yet",
- vi->xattr_isize, vi->nid);
- ret = -EOPNOTSUPP;
- goto out_unlock;
- } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
- if (unlikely(vi->xattr_isize)) {
- errln("bogus xattr ibody @ nid %llu", vi->nid);
- DBG_BUGON(1);
- ret = -EFSCORRUPTED;
- goto out_unlock; /* xattr ondisk layout error */
- }
- ret = -ENOATTR;
- goto out_unlock;
- }
-
- sb = inode->i_sb;
- sbi = EROFS_SB(sb);
- it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
- it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
-
- it.page = erofs_get_inline_page(inode, it.blkaddr);
- if (IS_ERR(it.page)) {
- ret = PTR_ERR(it.page);
- goto out_unlock;
- }
-
- /* read in shared xattr array (non-atomic, see kmalloc below) */
- it.kaddr = kmap(it.page);
- atomic_map = false;
-
- ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
-
- vi->xattr_shared_count = ih->h_shared_count;
- vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
- sizeof(uint), GFP_KERNEL);
- if (!vi->xattr_shared_xattrs) {
- xattr_iter_end(&it, atomic_map);
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- /* let's skip ibody header */
- it.ofs += sizeof(struct erofs_xattr_ibody_header);
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
- /* cannot be unaligned */
- DBG_BUGON(it.ofs != EROFS_BLKSIZ);
- xattr_iter_end(&it, atomic_map);
-
- it.page = erofs_get_meta_page(sb, ++it.blkaddr,
- S_ISDIR(inode->i_mode));
- if (IS_ERR(it.page)) {
- kfree(vi->xattr_shared_xattrs);
- vi->xattr_shared_xattrs = NULL;
- ret = PTR_ERR(it.page);
- goto out_unlock;
- }
-
- it.kaddr = kmap_atomic(it.page);
- atomic_map = true;
- it.ofs = 0;
- }
- vi->xattr_shared_xattrs[i] =
- le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
- it.ofs += sizeof(__le32);
- }
- xattr_iter_end(&it, atomic_map);
-
- set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
-
-out_unlock:
- clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
- return ret;
-}
-
-/*
- * the general idea for these return values is
- * if 0 is returned, go on processing the current xattr;
- * 1 (> 0) is returned, skip this round to process the next xattr;
- * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
- * and need to be handled
- */
-struct xattr_iter_handlers {
- int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
- int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
- int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
- void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
-};
-
-static inline int xattr_iter_fixup(struct xattr_iter *it)
-{
- if (it->ofs < EROFS_BLKSIZ)
- return 0;
-
- xattr_iter_end(it, true);
-
- it->blkaddr += erofs_blknr(it->ofs);
-
- it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
- if (IS_ERR(it->page)) {
- int err = PTR_ERR(it->page);
-
- it->page = NULL;
- return err;
- }
-
- it->kaddr = kmap_atomic(it->page);
- it->ofs = erofs_blkoff(it->ofs);
- return 0;
-}
-
-static int inline_xattr_iter_begin(struct xattr_iter *it,
- struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
- unsigned int xattr_header_sz, inline_xattr_ofs;
-
- xattr_header_sz = inlinexattr_header_size(inode);
- if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
- DBG_BUGON(xattr_header_sz > vi->xattr_isize);
- return -ENOATTR;
- }
-
- inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
-
- it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
- it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
-
- it->page = erofs_get_inline_page(inode, it->blkaddr);
- if (IS_ERR(it->page))
- return PTR_ERR(it->page);
-
- it->kaddr = kmap_atomic(it->page);
- return vi->xattr_isize - xattr_header_sz;
-}
-
-/*
- * Regardless of success or failure, `xattr_foreach' will end up with
- * `ofs' pointing to the next xattr item rather than an arbitrary position.
- */
-static int xattr_foreach(struct xattr_iter *it,
- const struct xattr_iter_handlers *op,
- unsigned int *tlimit)
-{
- struct erofs_xattr_entry entry;
- unsigned int value_sz, processed, slice;
- int err;
-
- /* 0. fixup blkaddr, ofs, ipage */
- err = xattr_iter_fixup(it);
- if (err)
- return err;
-
- /*
- * 1. read xattr entry to the memory,
- * since we do EROFS_XATTR_ALIGN
- * therefore entry should be in the page
- */
- entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
- if (tlimit) {
- unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
-
- /* xattr on-disk corruption: xattr entry beyond xattr_isize */
- if (unlikely(*tlimit < entry_sz)) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
- *tlimit -= entry_sz;
- }
-
- it->ofs += sizeof(struct erofs_xattr_entry);
- value_sz = le16_to_cpu(entry.e_value_size);
-
- /* handle entry */
- err = op->entry(it, &entry);
- if (err) {
- it->ofs += entry.e_name_len + value_sz;
- goto out;
- }
-
- /* 2. handle xattr name (ofs will finally be at the end of name) */
- processed = 0;
-
- while (processed < entry.e_name_len) {
- if (it->ofs >= EROFS_BLKSIZ) {
- DBG_BUGON(it->ofs > EROFS_BLKSIZ);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
- entry.e_name_len - processed);
-
- /* handle name */
- err = op->name(it, processed, it->kaddr + it->ofs, slice);
- if (err) {
- it->ofs += entry.e_name_len - processed + value_sz;
- goto out;
- }
-
- it->ofs += slice;
- processed += slice;
- }
-
- /* 3. handle xattr value */
- processed = 0;
-
- if (op->alloc_buffer) {
- err = op->alloc_buffer(it, value_sz);
- if (err) {
- it->ofs += value_sz;
- goto out;
- }
- }
-
- while (processed < value_sz) {
- if (it->ofs >= EROFS_BLKSIZ) {
- DBG_BUGON(it->ofs > EROFS_BLKSIZ);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
- value_sz - processed);
- op->value(it, processed, it->kaddr + it->ofs, slice);
- it->ofs += slice;
- processed += slice;
- }
-
-out:
- /* xattrs should be 4-byte aligned (on-disk constraint) */
- it->ofs = EROFS_XATTR_ALIGN(it->ofs);
- return err < 0 ? err : 0;
-}
-
-struct getxattr_iter {
- struct xattr_iter it;
-
- char *buffer;
- int buffer_size, index;
- struct qstr name;
-};
-
-static int xattr_entrymatch(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- return (it->index != entry->e_name_index ||
- it->name.len != entry->e_name_len) ? -ENOATTR : 0;
-}
-
-static int xattr_namematch(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
-}
-
-static int xattr_checkbuffer(struct xattr_iter *_it,
- unsigned int value_sz)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
- int err = it->buffer_size < value_sz ? -ERANGE : 0;
-
- it->buffer_size = value_sz;
- return !it->buffer ? 1 : err;
-}
-
-static void xattr_copyvalue(struct xattr_iter *_it,
- unsigned int processed,
- char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- memcpy(it->buffer + processed, buf, len);
-}
-
-static const struct xattr_iter_handlers find_xattr_handlers = {
- .entry = xattr_entrymatch,
- .name = xattr_namematch,
- .alloc_buffer = xattr_checkbuffer,
- .value = xattr_copyvalue
-};
-
-static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- int ret;
- unsigned int remaining;
-
- ret = inline_xattr_iter_begin(&it->it, inode);
- if (ret < 0)
- return ret;
-
- remaining = ret;
- while (remaining) {
- ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
- if (ret != -ENOATTR)
- break;
- }
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_size;
-}
-
-static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = -ENOATTR;
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- erofs_blk_t blkaddr =
- xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
-
- it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
-
- if (!i || blkaddr != it->it.blkaddr) {
- if (i)
- xattr_iter_end(&it->it, true);
-
- it->it.page = erofs_get_meta_page(sb, blkaddr, false);
- if (IS_ERR(it->it.page))
- return PTR_ERR(it->it.page);
-
- it->it.kaddr = kmap_atomic(it->it.page);
- it->it.blkaddr = blkaddr;
- }
-
- ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
- if (ret != -ENOATTR)
- break;
- }
- if (vi->xattr_shared_count)
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_size;
-}
-
-static bool erofs_xattr_user_list(struct dentry *dentry)
-{
- return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
-}
-
-static bool erofs_xattr_trusted_list(struct dentry *dentry)
-{
- return capable(CAP_SYS_ADMIN);
-}
-
-int erofs_getxattr(struct inode *inode, int index,
- const char *name,
- void *buffer, size_t buffer_size)
-{
- int ret;
- struct getxattr_iter it;
-
- if (unlikely(!name))
- return -EINVAL;
-
- ret = init_inode_xattrs(inode);
- if (ret)
- return ret;
-
- it.index = index;
-
- it.name.len = strlen(name);
- if (it.name.len > EROFS_NAME_LEN)
- return -ERANGE;
- it.name.name = name;
-
- it.buffer = buffer;
- it.buffer_size = buffer_size;
-
- it.it.sb = inode->i_sb;
- ret = inline_getxattr(inode, &it);
- if (ret == -ENOATTR)
- ret = shared_getxattr(inode, &it);
- return ret;
-}
-
-static int erofs_xattr_generic_get(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
-
- switch (handler->flags) {
- case EROFS_XATTR_INDEX_USER:
- if (!test_opt(sbi, XATTR_USER))
- return -EOPNOTSUPP;
- break;
- case EROFS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
- case EROFS_XATTR_INDEX_SECURITY:
- break;
- default:
- return -EINVAL;
- }
-
- return erofs_getxattr(inode, handler->flags, name, buffer, size);
-}
-
-const struct xattr_handler erofs_xattr_user_handler = {
- .prefix = XATTR_USER_PREFIX,
- .flags = EROFS_XATTR_INDEX_USER,
- .list = erofs_xattr_user_list,
- .get = erofs_xattr_generic_get,
-};
-
-const struct xattr_handler erofs_xattr_trusted_handler = {
- .prefix = XATTR_TRUSTED_PREFIX,
- .flags = EROFS_XATTR_INDEX_TRUSTED,
- .list = erofs_xattr_trusted_list,
- .get = erofs_xattr_generic_get,
-};
-
-#ifdef CONFIG_EROFS_FS_SECURITY
-const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .flags = EROFS_XATTR_INDEX_SECURITY,
- .get = erofs_xattr_generic_get,
-};
-#endif
-
-const struct xattr_handler *erofs_xattr_handlers[] = {
- &erofs_xattr_user_handler,
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
- &erofs_xattr_trusted_handler,
-#ifdef CONFIG_EROFS_FS_SECURITY
- &erofs_xattr_security_handler,
-#endif
- NULL,
-};
-
-struct listxattr_iter {
- struct xattr_iter it;
-
- struct dentry *dentry;
- char *buffer;
- int buffer_size, buffer_ofs;
-};
-
-static int xattr_entrylist(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
- unsigned int prefix_len;
- const char *prefix;
-
- const struct xattr_handler *h =
- erofs_xattr_handler(entry->e_name_index);
-
- if (!h || (h->list && !h->list(it->dentry)))
- return 1;
-
- prefix = xattr_prefix(h);
- prefix_len = strlen(prefix);
-
- if (!it->buffer) {
- it->buffer_ofs += prefix_len + entry->e_name_len + 1;
- return 1;
- }
-
- if (it->buffer_ofs + prefix_len
- + entry->e_name_len + 1 > it->buffer_size)
- return -ERANGE;
-
- memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
- it->buffer_ofs += prefix_len;
- return 0;
-}
-
-static int xattr_namelist(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
-
- memcpy(it->buffer + it->buffer_ofs, buf, len);
- it->buffer_ofs += len;
- return 0;
-}
-
-static int xattr_skipvalue(struct xattr_iter *_it,
- unsigned int value_sz)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
-
- it->buffer[it->buffer_ofs++] = '\0';
- return 1;
-}
-
-static const struct xattr_iter_handlers list_xattr_handlers = {
- .entry = xattr_entrylist,
- .name = xattr_namelist,
- .alloc_buffer = xattr_skipvalue,
- .value = NULL
-};
-
-static int inline_listxattr(struct listxattr_iter *it)
-{
- int ret;
- unsigned int remaining;
-
- ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
- if (ret < 0)
- return ret;
-
- remaining = ret;
- while (remaining) {
- ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
- if (ret)
- break;
- }
- xattr_iter_end_final(&it->it);
- return ret ? ret : it->buffer_ofs;
-}
-
-static int shared_listxattr(struct listxattr_iter *it)
-{
- struct inode *const inode = d_inode(it->dentry);
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- erofs_blk_t blkaddr =
- xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
-
- it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
- if (!i || blkaddr != it->it.blkaddr) {
- if (i)
- xattr_iter_end(&it->it, true);
-
- it->it.page = erofs_get_meta_page(sb, blkaddr, false);
- if (IS_ERR(it->it.page))
- return PTR_ERR(it->it.page);
-
- it->it.kaddr = kmap_atomic(it->it.page);
- it->it.blkaddr = blkaddr;
- }
-
- ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
- if (ret)
- break;
- }
- if (vi->xattr_shared_count)
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_ofs;
-}
-
-ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
-{
- int ret;
- struct listxattr_iter it;
-
- ret = init_inode_xattrs(d_inode(dentry));
- if (ret)
- return ret;
-
- it.dentry = dentry;
- it.buffer = buffer;
- it.buffer_size = buffer_size;
- it.buffer_ofs = 0;
-
- it.it.sb = dentry->d_sb;
-
- ret = inline_listxattr(&it);
- if (ret < 0 && ret != -ENOATTR)
- return ret;
- return shared_listxattr(&it);
-}
-
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
-struct posix_acl *erofs_get_acl(struct inode *inode, int type)
-{
- struct posix_acl *acl;
- int prefix, rc;
- char *value = NULL;
-
- switch (type) {
- case ACL_TYPE_ACCESS:
- prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
- break;
- case ACL_TYPE_DEFAULT:
- prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
- rc = erofs_getxattr(inode, prefix, "", NULL, 0);
- if (rc > 0) {
- value = kmalloc(rc, GFP_KERNEL);
- if (!value)
- return ERR_PTR(-ENOMEM);
- rc = erofs_getxattr(inode, prefix, "", value, rc);
- }
-
- if (rc == -ENOATTR)
- acl = NULL;
- else if (rc < 0)
- acl = ERR_PTR(rc);
- else
- acl = posix_acl_from_xattr(&init_user_ns, value, rc);
- kfree(value);
- return acl;
-}
-#endif
-
diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h
deleted file mode 100644
index e20249647541..000000000000
--- a/drivers/staging/erofs/xattr.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/staging/erofs/xattr.h
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_XATTR_H
-#define __EROFS_XATTR_H
-
-#include "internal.h"
-#include <linux/posix_acl_xattr.h>
-#include <linux/xattr.h>
-
-/* Attribute not found */
-#define ENOATTR ENODATA
-
-static inline unsigned int inlinexattr_header_size(struct inode *inode)
-{
- return sizeof(struct erofs_xattr_ibody_header)
- + sizeof(u32) * EROFS_V(inode)->xattr_shared_count;
-}
-
-static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi,
- unsigned int xattr_id)
-{
-#ifdef CONFIG_EROFS_FS_XATTR
- return sbi->xattr_blkaddr +
- xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
-#else
- return 0;
-#endif
-}
-
-static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi,
- unsigned int xattr_id)
-{
- return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
-}
-
-#ifdef CONFIG_EROFS_FS_XATTR
-extern const struct xattr_handler erofs_xattr_user_handler;
-extern const struct xattr_handler erofs_xattr_trusted_handler;
-#ifdef CONFIG_EROFS_FS_SECURITY
-extern const struct xattr_handler erofs_xattr_security_handler;
-#endif
-
-static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
-{
-static const struct xattr_handler *xattr_handler_map[] = {
- [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &posix_acl_default_xattr_handler,
-#endif
- [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
-#ifdef CONFIG_EROFS_FS_SECURITY
- [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
-#endif
-};
-
- return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
- xattr_handler_map[idx] : NULL;
-}
-
-extern const struct xattr_handler *erofs_xattr_handlers[];
-
-int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
-ssize_t erofs_listxattr(struct dentry *, char *, size_t);
-#else
-static inline int erofs_getxattr(struct inode *inode, int index,
- const char *name, void *buffer,
- size_t buffer_size)
-{
- return -EOPNOTSUPP;
-}
-
-static inline ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
-{
- return -EOPNOTSUPP;
-}
-#endif /* !CONFIG_EROFS_FS_XATTR */
-
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
-struct posix_acl *erofs_get_acl(struct inode *inode, int type);
-#else
-#define erofs_get_acl (NULL)
-#endif
-
-#endif
-
diff --git a/drivers/staging/erofs/zdata.c b/drivers/staging/erofs/zdata.c
deleted file mode 100644
index 2d7aaf98f7de..000000000000
--- a/drivers/staging/erofs/zdata.c
+++ /dev/null
@@ -1,1405 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/zdata.c
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "zdata.h"
-#include "compress.h"
-#include <linux/prefetch.h>
-
-#include <trace/events/erofs.h>
-
-/*
- * a compressed_pages[] placeholder in order to avoid
- * being filled with file pages for in-place decompression.
- */
-#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
-
-/* how to allocate cached pages for a pcluster */
-enum z_erofs_cache_alloctype {
- DONTALLOC, /* don't allocate any cached pages */
- DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
-};
-
-/*
- * tagged pointer with 1-bit tag for all compressed pages
- * tag 0 - the page is just found with an extra page reference
- */
-typedef tagptr1_t compressed_page_t;
-
-#define tag_compressed_page_justfound(page) \
- tagptr_fold(compressed_page_t, page, 1)
-
-static struct workqueue_struct *z_erofs_workqueue __read_mostly;
-static struct kmem_cache *pcluster_cachep __read_mostly;
-
-void z_erofs_exit_zip_subsystem(void)
-{
- destroy_workqueue(z_erofs_workqueue);
- kmem_cache_destroy(pcluster_cachep);
-}
-
-static inline int init_unzip_workqueue(void)
-{
- const unsigned int onlinecpus = num_possible_cpus();
- const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE;
-
- /*
- * no need to spawn too many threads, limiting threads could minimum
- * scheduling overhead, perhaps per-CPU threads should be better?
- */
- z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags,
- onlinecpus + onlinecpus / 4);
- return z_erofs_workqueue ? 0 : -ENOMEM;
-}
-
-static void init_once(void *ptr)
-{
- struct z_erofs_pcluster *pcl = ptr;
- struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
- unsigned int i;
-
- mutex_init(&cl->lock);
- cl->nr_pages = 0;
- cl->vcnt = 0;
- for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
- pcl->compressed_pages[i] = NULL;
-}
-
-static void init_always(struct z_erofs_pcluster *pcl)
-{
- struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
-
- atomic_set(&pcl->obj.refcount, 1);
-
- DBG_BUGON(cl->nr_pages);
- DBG_BUGON(cl->vcnt);
-}
-
-int __init z_erofs_init_zip_subsystem(void)
-{
- pcluster_cachep = kmem_cache_create("erofs_compress",
- Z_EROFS_WORKGROUP_SIZE, 0,
- SLAB_RECLAIM_ACCOUNT, init_once);
- if (pcluster_cachep) {
- if (!init_unzip_workqueue())
- return 0;
-
- kmem_cache_destroy(pcluster_cachep);
- }
- return -ENOMEM;
-}
-
-enum z_erofs_collectmode {
- COLLECT_SECONDARY,
- COLLECT_PRIMARY,
- /*
- * The current collection was the tail of an exist chain, in addition
- * that the previous processed chained collections are all decided to
- * be hooked up to it.
- * A new chain will be created for the remaining collections which are
- * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
- * the next collection cannot reuse the whole page safely in
- * the following scenario:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (belongs to the next cl) | (belongs to the current cl) |
- * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
- */
- COLLECT_PRIMARY_HOOKED,
- COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
- /*
- * The current collection has been linked with the owned chain, and
- * could also be linked with the remaining collections, which means
- * if the processing page is the tail page of the collection, thus
- * the current collection can safely use the whole page (since
- * the previous collection is under control) for in-place I/O, as
- * illustrated below:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current cl) | (of the previous collection) |
- * | PRIMARY_FOLLOWED or | |
- * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
- *
- * [ (*) the above page can be used as inplace I/O. ]
- */
- COLLECT_PRIMARY_FOLLOWED,
-};
-
-struct z_erofs_collector {
- struct z_erofs_pagevec_ctor vector;
-
- struct z_erofs_pcluster *pcl;
- struct z_erofs_collection *cl;
- struct page **compressedpages;
- z_erofs_next_pcluster_t owned_head;
-
- enum z_erofs_collectmode mode;
-};
-
-struct z_erofs_decompress_frontend {
- struct inode *const inode;
-
- struct z_erofs_collector clt;
- struct erofs_map_blocks map;
-
- /* used for applying cache strategy on the fly */
- bool backmost;
- erofs_off_t headoffset;
-};
-
-#define COLLECTOR_INIT() { \
- .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = COLLECT_PRIMARY_FOLLOWED }
-
-#define DECOMPRESS_FRONTEND_INIT(__i) { \
- .inode = __i, .clt = COLLECTOR_INIT(), \
- .backmost = true, }
-
-static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
-static DEFINE_MUTEX(z_pagemap_global_lock);
-
-static void preload_compressed_pages(struct z_erofs_collector *clt,
- struct address_space *mc,
- enum z_erofs_cache_alloctype type,
- struct list_head *pagepool)
-{
- const struct z_erofs_pcluster *pcl = clt->pcl;
- const unsigned int clusterpages = BIT(pcl->clusterbits);
- struct page **pages = clt->compressedpages;
- pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
- bool standalone = true;
-
- if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
- return;
-
- for (; pages < pcl->compressed_pages + clusterpages; ++pages) {
- struct page *page;
- compressed_page_t t;
-
- /* the compressed page was loaded before */
- if (READ_ONCE(*pages))
- continue;
-
- page = find_get_page(mc, index);
-
- if (page) {
- t = tag_compressed_page_justfound(page);
- } else if (type == DELAYEDALLOC) {
- t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
- } else { /* DONTALLOC */
- if (standalone)
- clt->compressedpages = pages;
- standalone = false;
- continue;
- }
-
- if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
- continue;
-
- if (page)
- put_page(page);
- }
-
- if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */
- clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
-}
-
-/* called by erofs_shrinker to get rid of all compressed_pages */
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp)
-{
- struct z_erofs_pcluster *const pcl =
- container_of(grp, struct z_erofs_pcluster, obj);
- struct address_space *const mapping = MNGD_MAPPING(sbi);
- const unsigned int clusterpages = BIT(pcl->clusterbits);
- int i;
-
- /*
- * refcount of workgroup is now freezed as 1,
- * therefore no need to worry about available decompression users.
- */
- for (i = 0; i < clusterpages; ++i) {
- struct page *page = pcl->compressed_pages[i];
-
- if (!page)
- continue;
-
- /* block other users from reclaiming or migrating the page */
- if (!trylock_page(page))
- return -EBUSY;
-
- if (unlikely(page->mapping != mapping))
- continue;
-
- /* barrier is implied in the following 'unlock_page' */
- WRITE_ONCE(pcl->compressed_pages[i], NULL);
- set_page_private(page, 0);
- ClearPagePrivate(page);
-
- unlock_page(page);
- put_page(page);
- }
- return 0;
-}
-
-int erofs_try_to_free_cached_page(struct address_space *mapping,
- struct page *page)
-{
- struct z_erofs_pcluster *const pcl = (void *)page_private(page);
- const unsigned int clusterpages = BIT(pcl->clusterbits);
- int ret = 0; /* 0 - busy */
-
- if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
- unsigned int i;
-
- for (i = 0; i < clusterpages; ++i) {
- if (pcl->compressed_pages[i] == page) {
- WRITE_ONCE(pcl->compressed_pages[i], NULL);
- ret = 1;
- break;
- }
- }
- erofs_workgroup_unfreeze(&pcl->obj, 1);
-
- if (ret) {
- ClearPagePrivate(page);
- put_page(page);
- }
- }
- return ret;
-}
-
-/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
-static inline bool try_inplace_io(struct z_erofs_collector *clt,
- struct page *page)
-{
- struct z_erofs_pcluster *const pcl = clt->pcl;
- const unsigned int clusterpages = BIT(pcl->clusterbits);
-
- while (clt->compressedpages < pcl->compressed_pages + clusterpages) {
- if (!cmpxchg(clt->compressedpages++, NULL, page))
- return true;
- }
- return false;
-}
-
-/* callers must be with collection lock held */
-static int z_erofs_attach_page(struct z_erofs_collector *clt,
- struct page *page,
- enum z_erofs_page_type type)
-{
- int ret;
- bool occupied;
-
- /* give priority for inplaceio */
- if (clt->mode >= COLLECT_PRIMARY &&
- type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
- try_inplace_io(clt, page))
- return 0;
-
- ret = z_erofs_pagevec_enqueue(&clt->vector,
- page, type, &occupied);
- clt->cl->vcnt += (unsigned int)ret;
-
- return ret ? 0 : -EAGAIN;
-}
-
-static enum z_erofs_collectmode
-try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
- z_erofs_next_pcluster_t *owned_head)
-{
- /* let's claim these following types of pclusters */
-retry:
- if (pcl->next == Z_EROFS_PCLUSTER_NIL) {
- /* type 1, nil pcluster */
- if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
- *owned_head) != Z_EROFS_PCLUSTER_NIL)
- goto retry;
-
- *owned_head = &pcl->next;
- /* lucky, I am the followee :) */
- return COLLECT_PRIMARY_FOLLOWED;
- } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) {
- /*
- * type 2, link to the end of a existing open chain,
- * be careful that its submission itself is governed
- * by the original owned chain.
- */
- if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
- *owned_head) != Z_EROFS_PCLUSTER_TAIL)
- goto retry;
- *owned_head = Z_EROFS_PCLUSTER_TAIL;
- return COLLECT_PRIMARY_HOOKED;
- }
- return COLLECT_PRIMARY; /* :( better luck next time */
-}
-
-static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
-{
- struct erofs_workgroup *grp;
- struct z_erofs_pcluster *pcl;
- struct z_erofs_collection *cl;
- unsigned int length;
- bool tag;
-
- grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
- if (!grp)
- return NULL;
-
- pcl = container_of(grp, struct z_erofs_pcluster, obj);
-
- cl = z_erofs_primarycollection(pcl);
- if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) {
- DBG_BUGON(1);
- return ERR_PTR(-EIO);
- }
-
- length = READ_ONCE(pcl->length);
- if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
- if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
- DBG_BUGON(1);
- return ERR_PTR(-EIO);
- }
- } else {
- unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
-
- if (map->m_flags & EROFS_MAP_FULL_MAPPED)
- llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
-
- while (llen > length &&
- length != cmpxchg_relaxed(&pcl->length, length, llen)) {
- cpu_relax();
- length = READ_ONCE(pcl->length);
- }
- }
- mutex_lock(&cl->lock);
- clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head);
- clt->pcl = pcl;
- clt->cl = cl;
- return cl;
-}
-
-static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
-{
- struct z_erofs_pcluster *pcl;
- struct z_erofs_collection *cl;
- int err;
-
- /* no available workgroup, let's allocate one */
- pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
- if (unlikely(!pcl))
- return ERR_PTR(-ENOMEM);
-
- init_always(pcl);
- pcl->obj.index = map->m_pa >> PAGE_SHIFT;
-
- pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
- (map->m_flags & EROFS_MAP_FULL_MAPPED ?
- Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
-
- if (map->m_flags & EROFS_MAP_ZIPPED)
- pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
- else
- pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
-
- pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0];
- pcl->clusterbits -= PAGE_SHIFT;
-
- /* new pclusters should be claimed as type 1, primary and followed */
- pcl->next = clt->owned_head;
- clt->mode = COLLECT_PRIMARY_FOLLOWED;
-
- cl = z_erofs_primarycollection(pcl);
- cl->pageofs = map->m_la & ~PAGE_MASK;
-
- /*
- * lock all primary followed works before visible to others
- * and mutex_trylock *never* fails for a new pcluster.
- */
- mutex_trylock(&cl->lock);
-
- err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
- if (err) {
- mutex_unlock(&cl->lock);
- kmem_cache_free(pcluster_cachep, pcl);
- return ERR_PTR(-EAGAIN);
- }
- clt->owned_head = &pcl->next;
- clt->pcl = pcl;
- clt->cl = cl;
- return cl;
-}
-
-static int z_erofs_collector_begin(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
-{
- struct z_erofs_collection *cl;
-
- DBG_BUGON(clt->cl);
-
- /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
- DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
- DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
-
- if (!PAGE_ALIGNED(map->m_pa)) {
- DBG_BUGON(1);
- return -EINVAL;
- }
-
-repeat:
- cl = cllookup(clt, inode, map);
- if (!cl) {
- cl = clregister(clt, inode, map);
-
- if (unlikely(cl == ERR_PTR(-EAGAIN)))
- goto repeat;
- }
-
- if (IS_ERR(cl))
- return PTR_ERR(cl);
-
- z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
- cl->pagevec, cl->vcnt);
-
- clt->compressedpages = clt->pcl->compressed_pages;
- if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
- clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES;
- return 0;
-}
-
-/*
- * keep in mind that no referenced pclusters will be freed
- * only after a RCU grace period.
- */
-static void z_erofs_rcu_callback(struct rcu_head *head)
-{
- struct z_erofs_collection *const cl =
- container_of(head, struct z_erofs_collection, rcu);
-
- kmem_cache_free(pcluster_cachep,
- container_of(cl, struct z_erofs_pcluster,
- primary_collection));
-}
-
-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
-{
- struct z_erofs_pcluster *const pcl =
- container_of(grp, struct z_erofs_pcluster, obj);
- struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
-
- call_rcu(&cl->rcu, z_erofs_rcu_callback);
-}
-
-static void z_erofs_collection_put(struct z_erofs_collection *cl)
-{
- struct z_erofs_pcluster *const pcl =
- container_of(cl, struct z_erofs_pcluster, primary_collection);
-
- erofs_workgroup_put(&pcl->obj);
-}
-
-static bool z_erofs_collector_end(struct z_erofs_collector *clt)
-{
- struct z_erofs_collection *cl = clt->cl;
-
- if (!cl)
- return false;
-
- z_erofs_pagevec_ctor_exit(&clt->vector, false);
- mutex_unlock(&cl->lock);
-
- /*
- * if all pending pages are added, don't hold its reference
- * any longer if the pcluster isn't hosted by ourselves.
- */
- if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
- z_erofs_collection_put(cl);
-
- clt->cl = NULL;
- return true;
-}
-
-static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
- gfp_t gfp)
-{
- struct page *page = erofs_allocpage(pagepool, gfp, true);
-
- page->mapping = Z_EROFS_MAPPING_STAGING;
- return page;
-}
-
-static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
- unsigned int cachestrategy,
- erofs_off_t la)
-{
- if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
- return false;
-
- if (fe->backmost)
- return true;
-
- return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
- la < fe->headoffset;
-}
-
-static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
- struct page *page,
- struct list_head *pagepool)
-{
- struct inode *const inode = fe->inode;
- struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
- struct erofs_map_blocks *const map = &fe->map;
- struct z_erofs_collector *const clt = &fe->clt;
- const loff_t offset = page_offset(page);
- bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED);
-
- enum z_erofs_cache_alloctype cache_strategy;
- enum z_erofs_page_type page_type;
- unsigned int cur, end, spiltted, index;
- int err = 0;
-
- /* register locked file pages as online pages in pack */
- z_erofs_onlinepage_init(page);
-
- spiltted = 0;
- end = PAGE_SIZE;
-repeat:
- cur = end - 1;
-
- /* lucky, within the range of the current map_blocks */
- if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen) {
- /* didn't get a valid collection previously (very rare) */
- if (!clt->cl)
- goto restart_now;
- goto hitted;
- }
-
- /* go ahead the next map_blocks */
- debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
-
- if (z_erofs_collector_end(clt))
- fe->backmost = false;
-
- map->m_la = offset + cur;
- map->m_llen = 0;
- err = z_erofs_map_blocks_iter(inode, map, 0);
- if (unlikely(err))
- goto err_out;
-
-restart_now:
- if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
- goto hitted;
-
- err = z_erofs_collector_begin(clt, inode, map);
- if (unlikely(err))
- goto err_out;
-
- /* preload all compressed pages (maybe downgrade role if necessary) */
- if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
- cache_strategy = DELAYEDALLOC;
- else
- cache_strategy = DONTALLOC;
-
- preload_compressed_pages(clt, MNGD_MAPPING(sbi),
- cache_strategy, pagepool);
-
- tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
-hitted:
- cur = end - min_t(unsigned int, offset + end - map->m_la, end);
- if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
- zero_user_segment(page, cur, end);
- goto next_part;
- }
-
- /* let's derive page type */
- page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
- (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
- (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
- Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
-
- if (cur)
- tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
-
-retry:
- err = z_erofs_attach_page(clt, page, page_type);
- /* should allocate an additional staging page for pagevec */
- if (err == -EAGAIN) {
- struct page *const newpage =
- __stagingpage_alloc(pagepool, GFP_NOFS);
-
- err = z_erofs_attach_page(clt, newpage,
- Z_EROFS_PAGE_TYPE_EXCLUSIVE);
- if (likely(!err))
- goto retry;
- }
-
- if (unlikely(err))
- goto err_out;
-
- index = page->index - (map->m_la >> PAGE_SHIFT);
-
- z_erofs_onlinepage_fixup(page, index, true);
-
- /* bump up the number of spiltted parts of a page */
- ++spiltted;
- /* also update nr_pages */
- clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
-next_part:
- /* can be used for verification */
- map->m_llen = offset + cur - map->m_la;
-
- end = cur;
- if (end > 0)
- goto repeat;
-
-out:
- z_erofs_onlinepage_endio(page);
-
- debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
- __func__, page, spiltted, map->m_llen);
- return err;
-
- /* if some error occurred while processing this page */
-err_out:
- SetPageError(page);
- goto out;
-}
-
-static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
-{
- tagptr1_t t = tagptr_init(tagptr1_t, ptr);
- struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t);
- bool background = tagptr_unfold_tags(t);
-
- if (!background) {
- unsigned long flags;
-
- spin_lock_irqsave(&io->u.wait.lock, flags);
- if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
- return;
- }
-
- if (!atomic_add_return(bios, &io->pending_bios))
- queue_work(z_erofs_workqueue, &io->u.work);
-}
-
-static inline void z_erofs_vle_read_endio(struct bio *bio)
-{
- struct erofs_sb_info *sbi = NULL;
- blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- bool cachemngd = false;
-
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(!page->mapping);
-
- if (unlikely(!sbi && !z_erofs_page_is_staging(page))) {
- sbi = EROFS_SB(page->mapping->host->i_sb);
-
- if (time_to_inject(sbi, FAULT_READ_IO)) {
- erofs_show_injection_info(FAULT_READ_IO);
- err = BLK_STS_IOERR;
- }
- }
-
- /* sbi should already be gotten if the page is managed */
- if (sbi)
- cachemngd = erofs_page_is_managed(sbi, page);
-
- if (unlikely(err))
- SetPageError(page);
- else if (cachemngd)
- SetPageUptodate(page);
-
- if (cachemngd)
- unlock_page(page);
- }
-
- z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
- bio_put(bio);
-}
-
-static int z_erofs_decompress_pcluster(struct super_block *sb,
- struct z_erofs_pcluster *pcl,
- struct list_head *pagepool)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- const unsigned int clusterpages = BIT(pcl->clusterbits);
- struct z_erofs_pagevec_ctor ctor;
- unsigned int i, outputsize, llen, nr_pages;
- struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
- struct page **pages, **compressed_pages, *page;
-
- enum z_erofs_page_type page_type;
- bool overlapped, partial;
- struct z_erofs_collection *cl;
- int err;
-
- might_sleep();
- cl = z_erofs_primarycollection(pcl);
- DBG_BUGON(!READ_ONCE(cl->nr_pages));
-
- mutex_lock(&cl->lock);
- nr_pages = cl->nr_pages;
-
- if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) {
- pages = pages_onstack;
- } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
- mutex_trylock(&z_pagemap_global_lock)) {
- pages = z_pagemap_global;
- } else {
- gfp_t gfp_flags = GFP_KERNEL;
-
- if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
- gfp_flags |= __GFP_NOFAIL;
-
- pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- gfp_flags);
-
- /* fallback to global pagemap for the lowmem scenario */
- if (unlikely(!pages)) {
- mutex_lock(&z_pagemap_global_lock);
- pages = z_pagemap_global;
- }
- }
-
- for (i = 0; i < nr_pages; ++i)
- pages[i] = NULL;
-
- z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
- cl->pagevec, 0);
-
- for (i = 0; i < cl->vcnt; ++i) {
- unsigned int pagenr;
-
- page = z_erofs_pagevec_dequeue(&ctor, &page_type);
-
- /* all pages in pagevec ought to be valid */
- DBG_BUGON(!page);
- DBG_BUGON(!page->mapping);
-
- if (z_erofs_put_stagingpage(pagepool, page))
- continue;
-
- if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
- pagenr = 0;
- else
- pagenr = z_erofs_onlinepage_index(page);
-
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
-
- pages[pagenr] = page;
- }
- z_erofs_pagevec_ctor_exit(&ctor, true);
-
- overlapped = false;
- compressed_pages = pcl->compressed_pages;
-
- err = 0;
- for (i = 0; i < clusterpages; ++i) {
- unsigned int pagenr;
-
- page = compressed_pages[i];
-
- /* all compressed pages ought to be valid */
- DBG_BUGON(!page);
- DBG_BUGON(!page->mapping);
-
- if (!z_erofs_page_is_staging(page)) {
- if (erofs_page_is_managed(sbi, page)) {
- if (unlikely(!PageUptodate(page)))
- err = -EIO;
- continue;
- }
-
- /*
- * only if non-head page can be selected
- * for inplace decompression
- */
- pagenr = z_erofs_onlinepage_index(page);
-
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
- pages[pagenr] = page;
-
- overlapped = true;
- }
-
- /* PG_error needs checking for inplaced and staging pages */
- if (unlikely(PageError(page))) {
- DBG_BUGON(PageUptodate(page));
- err = -EIO;
- }
- }
-
- if (unlikely(err))
- goto out;
-
- llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
- if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
- outputsize = llen;
- partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
- } else {
- outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
- partial = true;
- }
-
- err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
- .sb = sb,
- .in = compressed_pages,
- .out = pages,
- .pageofs_out = cl->pageofs,
- .inputsize = PAGE_SIZE,
- .outputsize = outputsize,
- .alg = pcl->algorithmformat,
- .inplace_io = overlapped,
- .partial_decoding = partial
- }, pagepool);
-
-out:
- /* must handle all compressed pages before endding pages */
- for (i = 0; i < clusterpages; ++i) {
- page = compressed_pages[i];
-
- if (erofs_page_is_managed(sbi, page))
- continue;
-
- /* recycle all individual staging pages */
- (void)z_erofs_put_stagingpage(pagepool, page);
-
- WRITE_ONCE(compressed_pages[i], NULL);
- }
-
- for (i = 0; i < nr_pages; ++i) {
- page = pages[i];
- if (!page)
- continue;
-
- DBG_BUGON(!page->mapping);
-
- /* recycle all individual staging pages */
- if (z_erofs_put_stagingpage(pagepool, page))
- continue;
-
- if (unlikely(err < 0))
- SetPageError(page);
-
- z_erofs_onlinepage_endio(page);
- }
-
- if (pages == z_pagemap_global)
- mutex_unlock(&z_pagemap_global_lock);
- else if (unlikely(pages != pages_onstack))
- kvfree(pages);
-
- cl->nr_pages = 0;
- cl->vcnt = 0;
-
- /* all cl locks MUST be taken before the following line */
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
-
- /* all cl locks SHOULD be released right now */
- mutex_unlock(&cl->lock);
-
- z_erofs_collection_put(cl);
- return err;
-}
-
-static void z_erofs_vle_unzip_all(struct super_block *sb,
- struct z_erofs_unzip_io *io,
- struct list_head *pagepool)
-{
- z_erofs_next_pcluster_t owned = io->head;
-
- while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
- struct z_erofs_pcluster *pcl;
-
- /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
-
- /* no possible that 'owned' equals NULL */
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
-
- pcl = container_of(owned, struct z_erofs_pcluster, next);
- owned = READ_ONCE(pcl->next);
-
- z_erofs_decompress_pcluster(sb, pcl, pagepool);
- }
-}
-
-static void z_erofs_vle_unzip_wq(struct work_struct *work)
-{
- struct z_erofs_unzip_io_sb *iosb =
- container_of(work, struct z_erofs_unzip_io_sb, io.u.work);
- LIST_HEAD(pagepool);
-
- DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool);
-
- put_pages_list(&pagepool);
- kvfree(iosb);
-}
-
-static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
- unsigned int nr,
- struct list_head *pagepool,
- struct address_space *mc,
- gfp_t gfp)
-{
- /* determined at compile time to avoid too many #ifdefs */
- const bool nocache = __builtin_constant_p(mc) ? !mc : false;
- const pgoff_t index = pcl->obj.index;
- bool tocache = false;
-
- struct address_space *mapping;
- struct page *oldpage, *page;
-
- compressed_page_t t;
- int justfound;
-
-repeat:
- page = READ_ONCE(pcl->compressed_pages[nr]);
- oldpage = page;
-
- if (!page)
- goto out_allocpage;
-
- /*
- * the cached page has not been allocated and
- * an placeholder is out there, prepare it now.
- */
- if (!nocache && page == PAGE_UNALLOCATED) {
- tocache = true;
- goto out_allocpage;
- }
-
- /* process the target tagged pointer */
- t = tagptr_init(compressed_page_t, page);
- justfound = tagptr_unfold_tags(t);
- page = tagptr_unfold_ptr(t);
-
- mapping = READ_ONCE(page->mapping);
-
- /*
- * if managed cache is disabled, it's no way to
- * get such a cached-like page.
- */
- if (nocache) {
- /* if managed cache is disabled, it is impossible `justfound' */
- DBG_BUGON(justfound);
-
- /* and it should be locked, not uptodate, and not truncated */
- DBG_BUGON(!PageLocked(page));
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(!mapping);
- goto out;
- }
-
- /*
- * unmanaged (file) pages are all locked solidly,
- * therefore it is impossible for `mapping' to be NULL.
- */
- if (mapping && mapping != mc)
- /* ought to be unmanaged pages */
- goto out;
-
- lock_page(page);
-
- /* only true if page reclaim goes wrong, should never happen */
- DBG_BUGON(justfound && PagePrivate(page));
-
- /* the page is still in manage cache */
- if (page->mapping == mc) {
- WRITE_ONCE(pcl->compressed_pages[nr], page);
-
- ClearPageError(page);
- if (!PagePrivate(page)) {
- /*
- * impossible to be !PagePrivate(page) for
- * the current restriction as well if
- * the page is already in compressed_pages[].
- */
- DBG_BUGON(!justfound);
-
- justfound = 0;
- set_page_private(page, (unsigned long)pcl);
- SetPagePrivate(page);
- }
-
- /* no need to submit io if it is already up-to-date */
- if (PageUptodate(page)) {
- unlock_page(page);
- page = NULL;
- }
- goto out;
- }
-
- /*
- * the managed page has been truncated, it's unsafe to
- * reuse this one, let's allocate a new cache-managed page.
- */
- DBG_BUGON(page->mapping);
- DBG_BUGON(!justfound);
-
- tocache = true;
- unlock_page(page);
- put_page(page);
-out_allocpage:
- page = __stagingpage_alloc(pagepool, gfp);
- if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
- list_add(&page->lru, pagepool);
- cpu_relax();
- goto repeat;
- }
- if (nocache || !tocache)
- goto out;
- if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
- page->mapping = Z_EROFS_MAPPING_STAGING;
- goto out;
- }
-
- set_page_private(page, (unsigned long)pcl);
- SetPagePrivate(page);
-out: /* the only exit (for tracing and debugging) */
- return page;
-}
-
-static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb,
- struct z_erofs_unzip_io *io,
- bool foreground)
-{
- struct z_erofs_unzip_io_sb *iosb;
-
- if (foreground) {
- /* waitqueue available for foreground io */
- DBG_BUGON(!io);
-
- init_waitqueue_head(&io->u.wait);
- atomic_set(&io->pending_bios, 0);
- goto out;
- }
-
- iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
- DBG_BUGON(!iosb);
-
- /* initialize fields in the allocated descriptor */
- io = &iosb->io;
- iosb->sb = sb;
- INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
-out:
- io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
- return io;
-}
-
-/* define decompression jobqueue types */
-enum {
- JQ_BYPASS,
- JQ_SUBMIT,
- NR_JOBQUEUES,
-};
-
-static void *jobqueueset_init(struct super_block *sb,
- z_erofs_next_pcluster_t qtail[],
- struct z_erofs_unzip_io *q[],
- struct z_erofs_unzip_io *fgq,
- bool forcefg)
-{
- /*
- * if managed cache is enabled, bypass jobqueue is needed,
- * no need to read from device for all pclusters in this queue.
- */
- q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
- qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
-
- q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
- qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
-
- return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
-}
-
-static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
- z_erofs_next_pcluster_t qtail[],
- z_erofs_next_pcluster_t owned_head)
-{
- z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
- z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
-
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- if (owned_head == Z_EROFS_PCLUSTER_TAIL)
- owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
-
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
-
- WRITE_ONCE(*submit_qtail, owned_head);
- WRITE_ONCE(*bypass_qtail, &pcl->next);
-
- qtail[JQ_BYPASS] = &pcl->next;
-}
-
-static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
- unsigned int nr_bios,
- bool force_fg)
-{
- /*
- * although background is preferred, no one is pending for submission.
- * don't issue workqueue for decompression but drop it directly instead.
- */
- if (force_fg || nr_bios)
- return false;
-
- kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io));
- return true;
-}
-
-static bool z_erofs_vle_submit_all(struct super_block *sb,
- z_erofs_next_pcluster_t owned_head,
- struct list_head *pagepool,
- struct z_erofs_unzip_io *fgq,
- bool force_fg)
-{
- struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
- z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
- struct z_erofs_unzip_io *q[NR_JOBQUEUES];
- struct bio *bio;
- void *bi_private;
- /* since bio will be NULL, no need to initialize last_index */
- pgoff_t uninitialized_var(last_index);
- bool force_submit = false;
- unsigned int nr_bios;
-
- if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL))
- return false;
-
- force_submit = false;
- bio = NULL;
- nr_bios = 0;
- bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
-
- /* by default, all need io submission */
- q[JQ_SUBMIT]->head = owned_head;
-
- do {
- struct z_erofs_pcluster *pcl;
- unsigned int clusterpages;
- pgoff_t first_index;
- struct page *page;
- unsigned int i = 0, bypass = 0;
- int err;
-
- /* no possible 'owned_head' equals the following */
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
-
- pcl = container_of(owned_head, struct z_erofs_pcluster, next);
-
- clusterpages = BIT(pcl->clusterbits);
-
- /* close the main owned chain at first */
- owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
- Z_EROFS_PCLUSTER_TAIL_CLOSED);
-
- first_index = pcl->obj.index;
- force_submit |= (first_index != last_index + 1);
-
-repeat:
- page = pickup_page_for_submission(pcl, i, pagepool,
- MNGD_MAPPING(sbi),
- GFP_NOFS);
- if (!page) {
- force_submit = true;
- ++bypass;
- goto skippage;
- }
-
- if (bio && force_submit) {
-submit_bio_retry:
- __submit_bio(bio, REQ_OP_READ, 0);
- bio = NULL;
- }
-
- if (!bio) {
- bio = erofs_grab_bio(sb, first_index + i,
- BIO_MAX_PAGES, bi_private,
- z_erofs_vle_read_endio, true);
- ++nr_bios;
- }
-
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (err < PAGE_SIZE)
- goto submit_bio_retry;
-
- force_submit = false;
- last_index = first_index + i;
-skippage:
- if (++i < clusterpages)
- goto repeat;
-
- if (bypass < clusterpages)
- qtail[JQ_SUBMIT] = &pcl->next;
- else
- move_to_bypass_jobqueue(pcl, qtail, owned_head);
- } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
-
- if (bio)
- __submit_bio(bio, REQ_OP_READ, 0);
-
- if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
- return true;
-
- z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
- return true;
-}
-
-static void z_erofs_submit_and_unzip(struct super_block *sb,
- struct z_erofs_collector *clt,
- struct list_head *pagepool,
- bool force_fg)
-{
- struct z_erofs_unzip_io io[NR_JOBQUEUES];
-
- if (!z_erofs_vle_submit_all(sb, clt->owned_head,
- pagepool, io, force_fg))
- return;
-
- /* decompress no I/O pclusters immediately */
- z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
-
- if (!force_fg)
- return;
-
- /* wait until all bios are completed */
- wait_event(io[JQ_SUBMIT].u.wait,
- !atomic_read(&io[JQ_SUBMIT].pending_bios));
-
- /* let's synchronous decompression */
- z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
-}
-
-static int z_erofs_vle_normalaccess_readpage(struct file *file,
- struct page *page)
-{
- struct inode *const inode = page->mapping->host;
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- int err;
- LIST_HEAD(pagepool);
-
- trace_erofs_readpage(page, false);
-
- f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
-
- err = z_erofs_do_read_page(&f, page, &pagepool);
- (void)z_erofs_collector_end(&f.clt);
-
- if (err) {
- errln("%s, failed to read, err [%d]", __func__, err);
- goto out;
- }
-
- z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true);
-out:
- if (f.map.mpage)
- put_page(f.map.mpage);
-
- /* clean up the remaining free pages */
- put_pages_list(&pagepool);
- return 0;
-}
-
-static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
- unsigned int nr)
-{
- return nr <= sbi->max_sync_decompress_pages;
-}
-
-static int z_erofs_vle_normalaccess_readpages(struct file *filp,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned int nr_pages)
-{
- struct inode *const inode = mapping->host;
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
-
- bool sync = should_decompress_synchronously(sbi, nr_pages);
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
- struct page *head = NULL;
- LIST_HEAD(pagepool);
-
- trace_erofs_readpages(mapping->host, lru_to_page(pages),
- nr_pages, false);
-
- f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
-
- for (; nr_pages; --nr_pages) {
- struct page *page = lru_to_page(pages);
-
- prefetchw(&page->flags);
- list_del(&page->lru);
-
- /*
- * A pure asynchronous readahead is indicated if
- * a PG_readahead marked page is hitted at first.
- * Let's also do asynchronous decompression for this case.
- */
- sync &= !(PageReadahead(page) && !head);
-
- if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
- list_add(&page->lru, &pagepool);
- continue;
- }
-
- set_page_private(page, (unsigned long)head);
- head = page;
- }
-
- while (head) {
- struct page *page = head;
- int err;
-
- /* traversal in reverse order */
- head = (void *)page_private(page);
-
- err = z_erofs_do_read_page(&f, page, &pagepool);
- if (err) {
- struct erofs_vnode *vi = EROFS_V(inode);
-
- errln("%s, readahead error at page %lu of nid %llu",
- __func__, page->index, vi->nid);
- }
- put_page(page);
- }
-
- (void)z_erofs_collector_end(&f.clt);
-
- z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync);
-
- if (f.map.mpage)
- put_page(f.map.mpage);
-
- /* clean up the remaining free pages */
- put_pages_list(&pagepool);
- return 0;
-}
-
-const struct address_space_operations z_erofs_vle_normalaccess_aops = {
- .readpage = z_erofs_vle_normalaccess_readpage,
- .readpages = z_erofs_vle_normalaccess_readpages,
-};
-
diff --git a/drivers/staging/erofs/zdata.h b/drivers/staging/erofs/zdata.h
deleted file mode 100644
index e11fe1959ca2..000000000000
--- a/drivers/staging/erofs/zdata.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/staging/erofs/zdata.h
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_FS_ZDATA_H
-#define __EROFS_FS_ZDATA_H
-
-#include "internal.h"
-#include "zpvec.h"
-
-#define Z_EROFS_NR_INLINE_PAGEVECS 3
-
-/*
- * Structure fields follow one of the following exclusion rules.
- *
- * I: Modifiable by initialization/destruction paths and read-only
- * for everyone else;
- *
- * L: Field should be protected by pageset lock;
- *
- * A: Field should be accessed / updated in atomic for parallelized code.
- */
-struct z_erofs_collection {
- struct mutex lock;
-
- /* I: page offset of start position of decompression */
- unsigned short pageofs;
-
- /* L: maximum relative page index in pagevec[] */
- unsigned short nr_pages;
-
- /* L: total number of pages in pagevec[] */
- unsigned int vcnt;
-
- union {
- /* L: inline a certain number of pagevecs for bootstrap */
- erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
-
- /* I: can be used to free the pcluster by RCU. */
- struct rcu_head rcu;
- };
-};
-
-#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
-#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
-
-/*
- * let's leave a type here in case of introducing
- * another tagged pointer later.
- */
-typedef void *z_erofs_next_pcluster_t;
-
-struct z_erofs_pcluster {
- struct erofs_workgroup obj;
- struct z_erofs_collection primary_collection;
-
- /* A: point to next chained pcluster or TAILs */
- z_erofs_next_pcluster_t next;
-
- /* A: compressed pages (including multi-usage pages) */
- struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
-
- /* A: lower limit of decompressed length and if full length or not */
- unsigned int length;
-
- /* I: compression algorithm format */
- unsigned char algorithmformat;
- /* I: bit shift of physical cluster size */
- unsigned char clusterbits;
-};
-
-#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
-
-/* let's avoid the valid 32-bit kernel addresses */
-
-/* the chained workgroup has't submitted io (still open) */
-#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
-/* the chained workgroup has already submitted io */
-#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
-
-#define Z_EROFS_PCLUSTER_NIL (NULL)
-
-#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
-
-struct z_erofs_unzip_io {
- atomic_t pending_bios;
- z_erofs_next_pcluster_t head;
-
- union {
- wait_queue_head_t wait;
- struct work_struct work;
- } u;
-};
-
-struct z_erofs_unzip_io_sb {
- struct z_erofs_unzip_io io;
- struct super_block *sb;
-};
-
-#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
-static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
- struct page *page)
-{
- return page->mapping == MNGD_MAPPING(sbi);
-}
-
-#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
-#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
-#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
-
-/*
- * waiters (aka. ongoing_packs): # to unlock the page
- * sub-index: 0 - for partial page, >= 1 full page sub-index
- */
-typedef atomic_t z_erofs_onlinepage_t;
-
-/* type punning */
-union z_erofs_onlinepage_converter {
- z_erofs_onlinepage_t *o;
- unsigned long *v;
-};
-
-static inline unsigned int z_erofs_onlinepage_index(struct page *page)
-{
- union z_erofs_onlinepage_converter u;
-
- DBG_BUGON(!PagePrivate(page));
- u.v = &page_private(page);
-
- return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-}
-
-static inline void z_erofs_onlinepage_init(struct page *page)
-{
- union {
- z_erofs_onlinepage_t o;
- unsigned long v;
- /* keep from being unlocked in advance */
- } u = { .o = ATOMIC_INIT(1) };
-
- set_page_private(page, u.v);
- smp_wmb();
- SetPagePrivate(page);
-}
-
-static inline void z_erofs_onlinepage_fixup(struct page *page,
- uintptr_t index, bool down)
-{
- unsigned long *p, o, v, id;
-repeat:
- p = &page_private(page);
- o = READ_ONCE(*p);
-
- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
- if (id) {
- if (!index)
- return;
-
- DBG_BUGON(id != index);
- }
-
- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
- if (cmpxchg(p, o, v) != o)
- goto repeat;
-}
-
-static inline void z_erofs_onlinepage_endio(struct page *page)
-{
- union z_erofs_onlinepage_converter u;
- unsigned int v;
-
- DBG_BUGON(!PagePrivate(page));
- u.v = &page_private(page);
-
- v = atomic_dec_return(u.o);
- if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
- ClearPagePrivate(page);
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- }
- debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
-}
-
-#define Z_EROFS_VMAP_ONSTACK_PAGES \
- min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
-#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
-
-#endif
-
diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c
deleted file mode 100644
index b61b9b5950ac..000000000000
--- a/drivers/staging/erofs/zmap.c
+++ /dev/null
@@ -1,463 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/staging/erofs/zmap.c
- *
- * Copyright (C) 2018-2019 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "internal.h"
-#include <asm/unaligned.h>
-#include <trace/events/erofs.h>
-
-int z_erofs_fill_inode(struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
-
- if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
- vi->z_advise = 0;
- vi->z_algorithmtype[0] = 0;
- vi->z_algorithmtype[1] = 0;
- vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
- vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits;
- vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits;
- set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
- }
-
- inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops;
- return 0;
-}
-
-static int fill_inode_lazy(struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
- int err;
- erofs_off_t pos;
- struct page *page;
- void *kaddr;
- struct z_erofs_map_header *h;
-
- if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags))
- return 0;
-
- if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE))
- return -ERESTARTSYS;
-
- err = 0;
- if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags))
- goto out_unlock;
-
- DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
-
- pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
- vi->xattr_isize, 8);
- page = erofs_get_meta_page(sb, erofs_blknr(pos), false);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out_unlock;
- }
-
- kaddr = kmap_atomic(page);
-
- h = kaddr + erofs_blkoff(pos);
- vi->z_advise = le16_to_cpu(h->h_advise);
- vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
- vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
-
- if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
- errln("unknown compression format %u for nid %llu, please upgrade kernel",
- vi->z_algorithmtype[0], vi->nid);
- err = -EOPNOTSUPP;
- goto unmap_done;
- }
-
- vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
- vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits +
- ((h->h_clusterbits >> 3) & 3);
-
- if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) {
- errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel",
- vi->z_physical_clusterbits[0], vi->nid);
- err = -EOPNOTSUPP;
- goto unmap_done;
- }
-
- vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
- ((h->h_clusterbits >> 5) & 7);
-unmap_done:
- kunmap_atomic(kaddr);
- unlock_page(page);
- put_page(page);
-
- set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
-out_unlock:
- clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags);
- return err;
-}
-
-struct z_erofs_maprecorder {
- struct inode *inode;
- struct erofs_map_blocks *map;
- void *kaddr;
-
- unsigned long lcn;
- /* compression extent information gathered */
- u8 type;
- u16 clusterofs;
- u16 delta[2];
- erofs_blk_t pblk;
-};
-
-static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
- erofs_blk_t eblk)
-{
- struct super_block *const sb = m->inode->i_sb;
- struct erofs_map_blocks *const map = m->map;
- struct page *mpage = map->mpage;
-
- if (mpage) {
- if (mpage->index == eblk) {
- if (!m->kaddr)
- m->kaddr = kmap_atomic(mpage);
- return 0;
- }
-
- if (m->kaddr) {
- kunmap_atomic(m->kaddr);
- m->kaddr = NULL;
- }
- put_page(mpage);
- }
-
- mpage = erofs_get_meta_page(sb, eblk, false);
- if (IS_ERR(mpage)) {
- map->mpage = NULL;
- return PTR_ERR(mpage);
- }
- m->kaddr = kmap_atomic(mpage);
- unlock_page(mpage);
- map->mpage = mpage;
- return 0;
-}
-
-static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
-{
- struct inode *const inode = m->inode;
- struct erofs_vnode *const vi = EROFS_V(inode);
- const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
- const erofs_off_t pos =
- Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
- vi->xattr_isize) +
- lcn * sizeof(struct z_erofs_vle_decompressed_index);
- struct z_erofs_vle_decompressed_index *di;
- unsigned int advise, type;
- int err;
-
- err = z_erofs_reload_indexes(m, erofs_blknr(pos));
- if (err)
- return err;
-
- m->lcn = lcn;
- di = m->kaddr + erofs_blkoff(pos);
-
- advise = le16_to_cpu(di->di_advise);
- type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
- ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
- switch (type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- m->clusterofs = 1 << vi->z_logical_clusterbits;
- m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
- m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
- break;
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- m->clusterofs = le16_to_cpu(di->di_clusterofs);
- m->pblk = le32_to_cpu(di->di_u.blkaddr);
- break;
- default:
- DBG_BUGON(1);
- return -EOPNOTSUPP;
- }
- m->type = type;
- return 0;
-}
-
-static unsigned int decode_compactedbits(unsigned int lobits,
- unsigned int lomask,
- u8 *in, unsigned int pos, u8 *type)
-{
- const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
- const unsigned int lo = v & lomask;
-
- *type = (v >> lobits) & 3;
- return lo;
-}
-
-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
- unsigned int amortizedshift,
- unsigned int eofs)
-{
- struct erofs_vnode *const vi = EROFS_V(m->inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- const unsigned int lomask = (1 << lclusterbits) - 1;
- unsigned int vcnt, base, lo, encodebits, nblk;
- int i;
- u8 *in, type;
-
- if (1 << amortizedshift == 4)
- vcnt = 2;
- else if (1 << amortizedshift == 2 && lclusterbits == 12)
- vcnt = 16;
- else
- return -EOPNOTSUPP;
-
- encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
- base = round_down(eofs, vcnt << amortizedshift);
- in = m->kaddr + base;
-
- i = (eofs - base) >> amortizedshift;
-
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
- m->type = type;
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
- m->clusterofs = 1 << lclusterbits;
- if (i + 1 != vcnt) {
- m->delta[0] = lo;
- return 0;
- }
- /*
- * since the last lcluster in the pack is special,
- * of which lo saves delta[1] rather than delta[0].
- * Hence, get delta[0] by the previous lcluster indirectly.
- */
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * (i - 1), &type);
- if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
- lo = 0;
- m->delta[0] = lo + 1;
- return 0;
- }
- m->clusterofs = lo;
- m->delta[0] = 0;
- /* figout out blkaddr (pblk) for HEAD lclusters */
- nblk = 1;
- while (i > 0) {
- --i;
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
- i -= lo;
-
- if (i >= 0)
- ++nblk;
- }
- in += (vcnt << amortizedshift) - sizeof(__le32);
- m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
- return 0;
-}
-
-static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
-{
- struct inode *const inode = m->inode;
- struct erofs_vnode *const vi = EROFS_V(inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
- vi->inode_isize + vi->xattr_isize, 8) +
- sizeof(struct z_erofs_map_header);
- const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
- unsigned int compacted_4b_initial, compacted_2b;
- unsigned int amortizedshift;
- erofs_off_t pos;
- int err;
-
- if (lclusterbits != 12)
- return -EOPNOTSUPP;
-
- if (lcn >= totalidx)
- return -EINVAL;
-
- m->lcn = lcn;
- /* used to align to 32-byte (compacted_2b) alignment */
- compacted_4b_initial = (32 - ebase % 32) / 4;
- if (compacted_4b_initial == 32 / 4)
- compacted_4b_initial = 0;
-
- if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
- compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
- else
- compacted_2b = 0;
-
- pos = ebase;
- if (lcn < compacted_4b_initial) {
- amortizedshift = 2;
- goto out;
- }
- pos += compacted_4b_initial * 4;
- lcn -= compacted_4b_initial;
-
- if (lcn < compacted_2b) {
- amortizedshift = 1;
- goto out;
- }
- pos += compacted_2b * 2;
- lcn -= compacted_2b;
- amortizedshift = 2;
-out:
- pos += lcn * (1 << amortizedshift);
- err = z_erofs_reload_indexes(m, erofs_blknr(pos));
- if (err)
- return err;
- return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
-}
-
-static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned int lcn)
-{
- const unsigned int datamode = EROFS_V(m->inode)->datamode;
-
- if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
- return vle_legacy_load_cluster_from_disk(m, lcn);
-
- if (datamode == EROFS_INODE_FLAT_COMPRESSION)
- return compacted_load_cluster_from_disk(m, lcn);
-
- return -EINVAL;
-}
-
-static int vle_extent_lookback(struct z_erofs_maprecorder *m,
- unsigned int lookback_distance)
-{
- struct erofs_vnode *const vi = EROFS_V(m->inode);
- struct erofs_map_blocks *const map = m->map;
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- unsigned long lcn = m->lcn;
- int err;
-
- if (lcn < lookback_distance) {
- errln("bogus lookback distance @ nid %llu", vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- /* load extent head logical cluster if needed */
- lcn -= lookback_distance;
- err = vle_load_cluster_from_disk(m, lcn);
- if (err)
- return err;
-
- switch (m->type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- return vle_extent_lookback(m, m->delta[0]);
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- map->m_flags &= ~EROFS_MAP_ZIPPED;
- /* fallthrough */
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- map->m_la = (lcn << lclusterbits) | m->clusterofs;
- break;
- default:
- errln("unknown type %u at lcn %lu of nid %llu",
- m->type, lcn, vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct z_erofs_maprecorder m = {
- .inode = inode,
- .map = map,
- };
- int err = 0;
- unsigned int lclusterbits, endoff;
- unsigned long long ofs, end;
-
- trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
-
- /* when trying to read beyond EOF, leave it unmapped */
- if (unlikely(map->m_la >= inode->i_size)) {
- map->m_llen = map->m_la + 1 - inode->i_size;
- map->m_la = inode->i_size;
- map->m_flags = 0;
- goto out;
- }
-
- err = fill_inode_lazy(inode);
- if (err)
- goto out;
-
- lclusterbits = vi->z_logical_clusterbits;
- ofs = map->m_la;
- m.lcn = ofs >> lclusterbits;
- endoff = ofs & ((1 << lclusterbits) - 1);
-
- err = vle_load_cluster_from_disk(&m, m.lcn);
- if (err)
- goto unmap_out;
-
- map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */
- end = (m.lcn + 1ULL) << lclusterbits;
-
- switch (m.type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- if (endoff >= m.clusterofs)
- map->m_flags &= ~EROFS_MAP_ZIPPED;
- /* fallthrough */
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- if (endoff >= m.clusterofs) {
- map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
- break;
- }
- /* m.lcn should be >= 1 if endoff < m.clusterofs */
- if (unlikely(!m.lcn)) {
- errln("invalid logical cluster 0 at nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_out;
- }
- end = (m.lcn << lclusterbits) | m.clusterofs;
- map->m_flags |= EROFS_MAP_FULL_MAPPED;
- m.delta[0] = 1;
- /* fallthrough */
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- /* get the correspoinding first chunk */
- err = vle_extent_lookback(&m, m.delta[0]);
- if (unlikely(err))
- goto unmap_out;
- break;
- default:
- errln("unknown type %u at offset %llu of nid %llu",
- m.type, ofs, vi->nid);
- err = -EOPNOTSUPP;
- goto unmap_out;
- }
-
- map->m_llen = end - map->m_la;
- map->m_plen = 1 << lclusterbits;
- map->m_pa = blknr_to_addr(m.pblk);
- map->m_flags |= EROFS_MAP_MAPPED;
-
-unmap_out:
- if (m.kaddr)
- kunmap_atomic(m.kaddr);
-
-out:
- debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
- __func__, map->m_la, map->m_pa,
- map->m_llen, map->m_plen, map->m_flags);
-
- trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
-
- /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
- DBG_BUGON(err < 0 && err != -ENOMEM);
- return err;
-}
-
diff --git a/drivers/staging/erofs/zpvec.h b/drivers/staging/erofs/zpvec.h
deleted file mode 100644
index 9798f5627786..000000000000
--- a/drivers/staging/erofs/zpvec.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/staging/erofs/zpvec.h
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_FS_ZPVEC_H
-#define __EROFS_FS_ZPVEC_H
-
-#include "tagptr.h"
-
-/* page type in pagevec for decompress subsystem */
-enum z_erofs_page_type {
- /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
- Z_EROFS_PAGE_TYPE_EXCLUSIVE,
-
- Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
-
- Z_EROFS_VLE_PAGE_TYPE_HEAD,
- Z_EROFS_VLE_PAGE_TYPE_MAX
-};
-
-extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
- __bad_page_type_exclusive(void);
-
-/* pagevec tagged pointer */
-typedef tagptr2_t erofs_vtptr_t;
-
-/* pagevec collector */
-struct z_erofs_pagevec_ctor {
- struct page *curr, *next;
- erofs_vtptr_t *pages;
-
- unsigned int nr, index;
-};
-
-static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
- bool atomic)
-{
- if (!ctor->curr)
- return;
-
- if (atomic)
- kunmap_atomic(ctor->pages);
- else
- kunmap(ctor->curr);
-}
-
-static inline struct page *
-z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
- unsigned int nr)
-{
- unsigned int index;
-
- /* keep away from occupied pages */
- if (ctor->next)
- return ctor->next;
-
- for (index = 0; index < nr; ++index) {
- const erofs_vtptr_t t = ctor->pages[index];
- const unsigned int tags = tagptr_unfold_tags(t);
-
- if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
- return tagptr_unfold_ptr(t);
- }
- DBG_BUGON(nr >= ctor->nr);
- return NULL;
-}
-
-static inline void
-z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
- bool atomic)
-{
- struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
-
- z_erofs_pagevec_ctor_exit(ctor, atomic);
-
- ctor->curr = next;
- ctor->next = NULL;
- ctor->pages = atomic ?
- kmap_atomic(ctor->curr) : kmap(ctor->curr);
-
- ctor->nr = PAGE_SIZE / sizeof(struct page *);
- ctor->index = 0;
-}
-
-static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
- unsigned int nr,
- erofs_vtptr_t *pages,
- unsigned int i)
-{
- ctor->nr = nr;
- ctor->curr = ctor->next = NULL;
- ctor->pages = pages;
-
- if (i >= nr) {
- i -= nr;
- z_erofs_pagevec_ctor_pagedown(ctor, false);
- while (i > ctor->nr) {
- i -= ctor->nr;
- z_erofs_pagevec_ctor_pagedown(ctor, false);
- }
- }
- ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
- ctor->index = i;
-}
-
-static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
- struct page *page,
- enum z_erofs_page_type type,
- bool *occupied)
-{
- *occupied = false;
- if (unlikely(!ctor->next && type))
- if (ctor->index + 1 == ctor->nr)
- return false;
-
- if (unlikely(ctor->index >= ctor->nr))
- z_erofs_pagevec_ctor_pagedown(ctor, false);
-
- /* exclusive page type must be 0 */
- if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
- __bad_page_type_exclusive();
-
- /* should remind that collector->next never equal to 1, 2 */
- if (type == (uintptr_t)ctor->next) {
- ctor->next = page;
- *occupied = true;
- }
- ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
- return true;
-}
-
-static inline struct page *
-z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
- enum z_erofs_page_type *type)
-{
- erofs_vtptr_t t;
-
- if (unlikely(ctor->index >= ctor->nr)) {
- DBG_BUGON(!ctor->next);
- z_erofs_pagevec_ctor_pagedown(ctor, true);
- }
-
- t = ctor->pages[ctor->index];
-
- *type = tagptr_unfold_tags(t);
-
- /* should remind that collector->next never equal to 1, 2 */
- if (*type == (uintptr_t)ctor->next)
- ctor->next = tagptr_unfold_ptr(t);
-
- ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
- return tagptr_unfold_ptr(t);
-}
-#endif
-
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
new file mode 100644
index 000000000000..290dbfc7ace1
--- /dev/null
+++ b/drivers/staging/exfat/Kconfig
@@ -0,0 +1,49 @@
+config EXFAT_FS
+ tristate "exFAT fs support"
+ depends on BLOCK
+ select NLS
+ help
+ This adds support for the exFAT file system.
+
+config EXFAT_DONT_MOUNT_VFAT
+ bool "Prohibit mounting of fat/vfat filesysems by exFAT"
+ depends on EXFAT_FS
+ default y
+ help
+ By default, the exFAT driver will only mount exFAT filesystems, and refuse
+ to mount fat/vfat filesystems. Set this to 'n' to allow the exFAT driver
+ to mount these filesystems.
+
+config EXFAT_DISCARD
+ bool "enable discard support"
+ depends on EXFAT_FS
+ default y
+
+config EXFAT_DELAYED_SYNC
+ bool "enable delayed sync"
+ depends on EXFAT_FS
+ default n
+
+config EXFAT_KERNEL_DEBUG
+ bool "enable kernel debug features via ioctl"
+ depends on EXFAT_FS
+ default n
+
+config EXFAT_DEBUG_MSG
+ bool "print debug messages"
+ depends on EXFAT_FS
+ default n
+
+config EXFAT_DEFAULT_CODEPAGE
+ int "Default codepage for exFAT"
+ default 437
+ depends on EXFAT_FS
+ help
+ This option should be set to the codepage of your exFAT filesystems.
+
+config EXFAT_DEFAULT_IOCHARSET
+ string "Default iocharset for exFAT"
+ default "utf8"
+ depends on EXFAT_FS
+ help
+ Set this to the default input/output character set you'd like exFAT to use.
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
new file mode 100644
index 000000000000..84944dfbae28
--- /dev/null
+++ b/drivers/staging/exfat/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_EXFAT_FS) += exfat.o
+
+exfat-y := exfat_core.o \
+ exfat_super.o \
+ exfat_blkdev.o \
+ exfat_cache.o \
+ exfat_nls.o \
+ exfat_upcase.o
diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO
new file mode 100644
index 000000000000..a3eb282f9efc
--- /dev/null
+++ b/drivers/staging/exfat/TODO
@@ -0,0 +1,12 @@
+exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse().
+same for ffsWriteFile.
+
+exfat_core.c - fs_sync(sb,0) all over the place looks fishy as hell.
+There's only one place that calls it with a non-zero argument.
+
+ffsTruncateFile - if (old_size <= new_size) {
+That doesn't look right. How did it ever work? Are they relying on lazy
+block allocation when actual writes happen? If nothing else, it never
+does the 'fid->size = new_size' and do the inode update....
+
+ffsSetAttr() is just dangling in the breeze, not wired up at all...
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
new file mode 100644
index 000000000000..0aa14dea4e09
--- /dev/null
+++ b/drivers/staging/exfat/exfat.h
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef _EXFAT_H
+#define _EXFAT_H
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ /* For Debugging Purpose */
+ /* IOCTL code 'f' used by
+ * - file systems typically #0~0x1F
+ * - embedded terminal devices #128~
+ * - exts for debugging purpose #99
+ * number 100 and 101 is available now but has possible conflicts
+ */
+#define EXFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long)
+#define EXFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long)
+
+#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01
+#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+#ifdef CONFIG_EXFAT_DEBUG_MSG
+#define DEBUG 1
+#else
+#undef DEBUG
+#endif
+
+#define DENTRY_SIZE 32 /* dir entry size */
+#define DENTRY_SIZE_BITS 5
+
+/* PBR entries */
+#define PBR_SIGNATURE 0xAA55
+#define EXT_SIGNATURE 0xAA550000
+#define VOL_LABEL "NO NAME " /* size should be 11 */
+#define OEM_NAME "MSWIN4.1" /* size should be 8 */
+#define STR_FAT12 "FAT12 " /* size should be 8 */
+#define STR_FAT16 "FAT16 " /* size should be 8 */
+#define STR_FAT32 "FAT32 " /* size should be 8 */
+#define STR_EXFAT "EXFAT " /* size should be 8 */
+#define VOL_CLEAN 0x0000
+#define VOL_DIRTY 0x0002
+
+/* max number of clusters */
+#define FAT12_THRESHOLD 4087 /* 2^12 - 1 + 2 (clu 0 & 1) */
+#define FAT16_THRESHOLD 65527 /* 2^16 - 1 + 2 */
+#define FAT32_THRESHOLD 268435457 /* 2^28 - 1 + 2 */
+#define EXFAT_THRESHOLD 268435457 /* 2^28 - 1 + 2 */
+
+/* file types */
+#define TYPE_UNUSED 0x0000
+#define TYPE_DELETED 0x0001
+#define TYPE_INVALID 0x0002
+#define TYPE_CRITICAL_PRI 0x0100
+#define TYPE_BITMAP 0x0101
+#define TYPE_UPCASE 0x0102
+#define TYPE_VOLUME 0x0103
+#define TYPE_DIR 0x0104
+#define TYPE_FILE 0x011F
+#define TYPE_SYMLINK 0x015F
+#define TYPE_CRITICAL_SEC 0x0200
+#define TYPE_STREAM 0x0201
+#define TYPE_EXTEND 0x0202
+#define TYPE_ACL 0x0203
+#define TYPE_BENIGN_PRI 0x0400
+#define TYPE_GUID 0x0401
+#define TYPE_PADDING 0x0402
+#define TYPE_ACLTAB 0x0403
+#define TYPE_BENIGN_SEC 0x0800
+#define TYPE_ALL 0x0FFF
+
+/* time modes */
+#define TM_CREATE 0
+#define TM_MODIFY 1
+#define TM_ACCESS 2
+
+/* checksum types */
+#define CS_DIR_ENTRY 0
+#define CS_PBR_SECTOR 1
+#define CS_DEFAULT 2
+
+#define CLUSTER_16(x) ((u16)(x))
+#define CLUSTER_32(x) ((u32)(x))
+
+#define START_SECTOR(x) \
+ ((((sector_t)((x) - 2)) << p_fs->sectors_per_clu_bits) + \
+ p_fs->data_start_sector)
+
+#define IS_LAST_SECTOR_IN_CLUSTER(sec) \
+ ((((sec) - p_fs->data_start_sector + 1) & \
+ ((1 << p_fs->sectors_per_clu_bits) - 1)) == 0)
+
+#define GET_CLUSTER_FROM_SECTOR(sec) \
+ ((u32)((((sec) - p_fs->data_start_sector) >> \
+ p_fs->sectors_per_clu_bits) + 2))
+
+#define GET16(p_src) \
+ (((u16)(p_src)[0]) | (((u16)(p_src)[1]) << 8))
+#define GET32(p_src) \
+ (((u32)(p_src)[0]) | (((u32)(p_src)[1]) << 8) | \
+ (((u32)(p_src)[2]) << 16) | (((u32)(p_src)[3]) << 24))
+#define GET64(p_src) \
+ (((u64)(p_src)[0]) | (((u64)(p_src)[1]) << 8) | \
+ (((u64)(p_src)[2]) << 16) | (((u64)(p_src)[3]) << 24) | \
+ (((u64)(p_src)[4]) << 32) | (((u64)(p_src)[5]) << 40) | \
+ (((u64)(p_src)[6]) << 48) | (((u64)(p_src)[7]) << 56))
+
+#define SET16(p_dst, src) \
+ do { \
+ (p_dst)[0] = (u8)(src); \
+ (p_dst)[1] = (u8)(((u16)(src)) >> 8); \
+ } while (0)
+#define SET32(p_dst, src) \
+ do { \
+ (p_dst)[0] = (u8)(src); \
+ (p_dst)[1] = (u8)(((u32)(src)) >> 8); \
+ (p_dst)[2] = (u8)(((u32)(src)) >> 16); \
+ (p_dst)[3] = (u8)(((u32)(src)) >> 24); \
+ } while (0)
+#define SET64(p_dst, src) \
+ do { \
+ (p_dst)[0] = (u8)(src); \
+ (p_dst)[1] = (u8)(((u64)(src)) >> 8); \
+ (p_dst)[2] = (u8)(((u64)(src)) >> 16); \
+ (p_dst)[3] = (u8)(((u64)(src)) >> 24); \
+ (p_dst)[4] = (u8)(((u64)(src)) >> 32); \
+ (p_dst)[5] = (u8)(((u64)(src)) >> 40); \
+ (p_dst)[6] = (u8)(((u64)(src)) >> 48); \
+ (p_dst)[7] = (u8)(((u64)(src)) >> 56); \
+ } while (0)
+
+#ifdef __LITTLE_ENDIAN
+#define GET16_A(p_src) (*((u16 *)(p_src)))
+#define GET32_A(p_src) (*((u32 *)(p_src)))
+#define GET64_A(p_src) (*((u64 *)(p_src)))
+#define SET16_A(p_dst, src) (*((u16 *)(p_dst)) = (u16)(src))
+#define SET32_A(p_dst, src) (*((u32 *)(p_dst)) = (u32)(src))
+#define SET64_A(p_dst, src) (*((u64 *)(p_dst)) = (u64)(src))
+#else /* BIG_ENDIAN */
+#define GET16_A(p_src) GET16(p_src)
+#define GET32_A(p_src) GET32(p_src)
+#define GET64_A(p_src) GET64(p_src)
+#define SET16_A(p_dst, src) SET16(p_dst, src)
+#define SET32_A(p_dst, src) SET32(p_dst, src)
+#define SET64_A(p_dst, src) SET64(p_dst, src)
+#endif
+
+/* cache size (in number of sectors) */
+/* (should be an exponential value of 2) */
+#define FAT_CACHE_SIZE 128
+#define FAT_CACHE_HASH_SIZE 64
+#define BUF_CACHE_SIZE 256
+#define BUF_CACHE_HASH_SIZE 64
+
+/* Upcase table macro */
+#define HIGH_INDEX_BIT (8)
+#define HIGH_INDEX_MASK (0xFF00)
+#define LOW_INDEX_BIT (16-HIGH_INDEX_BIT)
+#define UTBL_ROW_COUNT BIT(LOW_INDEX_BIT)
+#define UTBL_COL_COUNT BIT(HIGH_INDEX_BIT)
+
+static inline u16 get_col_index(u16 i)
+{
+ return i >> LOW_INDEX_BIT;
+}
+
+static inline u16 get_row_index(u16 i)
+{
+ return i & ~HIGH_INDEX_MASK;
+}
+
+#define EXFAT_SUPER_MAGIC (0x2011BAB0L)
+#define EXFAT_ROOT_INO 1
+
+/* FAT types */
+#define FAT12 0x01 /* FAT12 */
+#define FAT16 0x0E /* Win95 FAT16 (LBA) */
+#define FAT32 0x0C /* Win95 FAT32 (LBA) */
+#define EXFAT 0x07 /* exFAT */
+
+/* file name lengths */
+#define MAX_CHARSET_SIZE 3 /* max size of multi-byte character */
+#define MAX_PATH_DEPTH 15 /* max depth of path name */
+#define MAX_NAME_LENGTH 256 /* max len of filename including NULL */
+#define MAX_PATH_LENGTH 260 /* max len of pathname including NULL */
+#define DOS_NAME_LENGTH 11 /* DOS filename length excluding NULL */
+#define DOS_PATH_LENGTH 80 /* DOS pathname length excluding NULL */
+
+/* file attributes */
+#define ATTR_NORMAL 0x0000
+#define ATTR_READONLY 0x0001
+#define ATTR_HIDDEN 0x0002
+#define ATTR_SYSTEM 0x0004
+#define ATTR_VOLUME 0x0008
+#define ATTR_SUBDIR 0x0010
+#define ATTR_ARCHIVE 0x0020
+#define ATTR_SYMLINK 0x0040
+#define ATTR_EXTEND 0x000F
+#define ATTR_RWMASK 0x007E
+
+/* file creation modes */
+#define FM_REGULAR 0x00
+#define FM_SYMLINK 0x40
+
+/* return values */
+#define FFS_SUCCESS 0
+#define FFS_MEDIAERR 1
+#define FFS_FORMATERR 2
+#define FFS_MOUNTED 3
+#define FFS_NOTMOUNTED 4
+#define FFS_ALIGNMENTERR 5
+#define FFS_SEMAPHOREERR 6
+#define FFS_INVALIDPATH 7
+#define FFS_INVALIDFID 8
+#define FFS_NOTFOUND 9
+#define FFS_FILEEXIST 10
+#define FFS_PERMISSIONERR 11
+#define FFS_NOTOPENED 12
+#define FFS_MAXOPENED 13
+#define FFS_FULL 14
+#define FFS_EOF 15
+#define FFS_DIRBUSY 16
+#define FFS_MEMORYERR 17
+#define FFS_NAMETOOLONG 18
+#define FFS_ERROR 19
+
+#define NUM_UPCASE 2918
+
+#define DOS_CUR_DIR_NAME ". "
+#define DOS_PAR_DIR_NAME ".. "
+
+#ifdef __LITTLE_ENDIAN
+#define UNI_CUR_DIR_NAME ".\0"
+#define UNI_PAR_DIR_NAME ".\0.\0"
+#else
+#define UNI_CUR_DIR_NAME "\0."
+#define UNI_PAR_DIR_NAME "\0.\0."
+#endif
+
+struct date_time_t {
+ u16 Year;
+ u16 Month;
+ u16 Day;
+ u16 Hour;
+ u16 Minute;
+ u16 Second;
+ u16 MilliSecond;
+};
+
+struct part_info_t {
+ u32 Offset; /* start sector number of the partition */
+ u32 Size; /* in sectors */
+};
+
+struct dev_info_t {
+ u32 SecSize; /* sector size in bytes */
+ u32 DevSize; /* block device size in sectors */
+};
+
+struct vol_info_t {
+ u32 FatType;
+ u32 ClusterSize;
+ u32 NumClusters;
+ u32 FreeClusters;
+ u32 UsedClusters;
+};
+
+/* directory structure */
+struct chain_t {
+ u32 dir;
+ s32 size;
+ u8 flags;
+};
+
+struct file_id_t {
+ struct chain_t dir;
+ s32 entry;
+ u32 type;
+ u32 attr;
+ u32 start_clu;
+ u64 size;
+ u8 flags;
+ s64 rwoffset;
+ s32 hint_last_off;
+ u32 hint_last_clu;
+};
+
+struct dir_entry_t {
+ char Name[MAX_NAME_LENGTH * MAX_CHARSET_SIZE];
+
+ /* used only for FAT12/16/32, not used for exFAT */
+ char ShortName[DOS_NAME_LENGTH + 2];
+
+ u32 Attr;
+ u64 Size;
+ u32 NumSubdirs;
+ struct date_time_t CreateTimestamp;
+ struct date_time_t ModifyTimestamp;
+ struct date_time_t AccessTimestamp;
+};
+
+struct timestamp_t {
+ u16 sec; /* 0 ~ 59 */
+ u16 min; /* 0 ~ 59 */
+ u16 hour; /* 0 ~ 23 */
+ u16 day; /* 1 ~ 31 */
+ u16 mon; /* 1 ~ 12 */
+ u16 year; /* 0 ~ 127 (since 1980) */
+};
+
+/* MS_DOS FAT partition boot record (512 bytes) */
+struct pbr_sector_t {
+ u8 jmp_boot[3];
+ u8 oem_name[8];
+ u8 bpb[109];
+ u8 boot_code[390];
+ u8 signature[2];
+};
+
+/* MS-DOS FAT12/16 BIOS parameter block (51 bytes) */
+struct bpb16_t {
+ u8 sector_size[2];
+ u8 sectors_per_clu;
+ u8 num_reserved[2];
+ u8 num_fats;
+ u8 num_root_entries[2];
+ u8 num_sectors[2];
+ u8 media_type;
+ u8 num_fat_sectors[2];
+ u8 sectors_in_track[2];
+ u8 num_heads[2];
+ u8 num_hid_sectors[4];
+ u8 num_huge_sectors[4];
+
+ u8 phy_drv_no;
+ u8 reserved;
+ u8 ext_signature;
+ u8 vol_serial[4];
+ u8 vol_label[11];
+ u8 vol_type[8];
+};
+
+/* MS-DOS FAT32 BIOS parameter block (79 bytes) */
+struct bpb32_t {
+ u8 sector_size[2];
+ u8 sectors_per_clu;
+ u8 num_reserved[2];
+ u8 num_fats;
+ u8 num_root_entries[2];
+ u8 num_sectors[2];
+ u8 media_type;
+ u8 num_fat_sectors[2];
+ u8 sectors_in_track[2];
+ u8 num_heads[2];
+ u8 num_hid_sectors[4];
+ u8 num_huge_sectors[4];
+ u8 num_fat32_sectors[4];
+ u8 ext_flags[2];
+ u8 fs_version[2];
+ u8 root_cluster[4];
+ u8 fsinfo_sector[2];
+ u8 backup_sector[2];
+ u8 reserved[12];
+
+ u8 phy_drv_no;
+ u8 ext_reserved;
+ u8 ext_signature;
+ u8 vol_serial[4];
+ u8 vol_label[11];
+ u8 vol_type[8];
+};
+
+/* MS-DOS EXFAT BIOS parameter block (109 bytes) */
+struct bpbex_t {
+ u8 reserved1[53];
+ u8 vol_offset[8];
+ u8 vol_length[8];
+ u8 fat_offset[4];
+ u8 fat_length[4];
+ u8 clu_offset[4];
+ u8 clu_count[4];
+ u8 root_cluster[4];
+ u8 vol_serial[4];
+ u8 fs_version[2];
+ u8 vol_flags[2];
+ u8 sector_size_bits;
+ u8 sectors_per_clu_bits;
+ u8 num_fats;
+ u8 phy_drv_no;
+ u8 perc_in_use;
+ u8 reserved2[7];
+};
+
+/* MS-DOS FAT file system information sector (512 bytes) */
+struct fsi_sector_t {
+ u8 signature1[4];
+ u8 reserved1[480];
+ u8 signature2[4];
+ u8 free_cluster[4];
+ u8 next_cluster[4];
+ u8 reserved2[14];
+ u8 signature3[2];
+};
+
+/* MS-DOS FAT directory entry (32 bytes) */
+struct dentry_t {
+ u8 dummy[32];
+};
+
+struct dos_dentry_t {
+ u8 name[DOS_NAME_LENGTH];
+ u8 attr;
+ u8 lcase;
+ u8 create_time_ms;
+ u8 create_time[2];
+ u8 create_date[2];
+ u8 access_date[2];
+ u8 start_clu_hi[2];
+ u8 modify_time[2];
+ u8 modify_date[2];
+ u8 start_clu_lo[2];
+ u8 size[4];
+};
+
+/* MS-DOS FAT extended directory entry (32 bytes) */
+struct ext_dentry_t {
+ u8 order;
+ u8 unicode_0_4[10];
+ u8 attr;
+ u8 sysid;
+ u8 checksum;
+ u8 unicode_5_10[12];
+ u8 start_clu[2];
+ u8 unicode_11_12[4];
+};
+
+/* MS-DOS EXFAT file directory entry (32 bytes) */
+struct file_dentry_t {
+ u8 type;
+ u8 num_ext;
+ u8 checksum[2];
+ u8 attr[2];
+ u8 reserved1[2];
+ u8 create_time[2];
+ u8 create_date[2];
+ u8 modify_time[2];
+ u8 modify_date[2];
+ u8 access_time[2];
+ u8 access_date[2];
+ u8 create_time_ms;
+ u8 modify_time_ms;
+ u8 access_time_ms;
+ u8 reserved2[9];
+};
+
+/* MS-DOS EXFAT stream extension directory entry (32 bytes) */
+struct strm_dentry_t {
+ u8 type;
+ u8 flags;
+ u8 reserved1;
+ u8 name_len;
+ u8 name_hash[2];
+ u8 reserved2[2];
+ u8 valid_size[8];
+ u8 reserved3[4];
+ u8 start_clu[4];
+ u8 size[8];
+};
+
+/* MS-DOS EXFAT file name directory entry (32 bytes) */
+struct name_dentry_t {
+ u8 type;
+ u8 flags;
+ u8 unicode_0_14[30];
+};
+
+/* MS-DOS EXFAT allocation bitmap directory entry (32 bytes) */
+struct bmap_dentry_t {
+ u8 type;
+ u8 flags;
+ u8 reserved[18];
+ u8 start_clu[4];
+ u8 size[8];
+};
+
+/* MS-DOS EXFAT up-case table directory entry (32 bytes) */
+struct case_dentry_t {
+ u8 type;
+ u8 reserved1[3];
+ u8 checksum[4];
+ u8 reserved2[12];
+ u8 start_clu[4];
+ u8 size[8];
+};
+
+/* MS-DOS EXFAT volume label directory entry (32 bytes) */
+struct volm_dentry_t {
+ u8 type;
+ u8 label_len;
+ u8 unicode_0_10[22];
+ u8 reserved[8];
+};
+
+/* unused entry hint information */
+struct uentry_t {
+ u32 dir;
+ s32 entry;
+ struct chain_t clu;
+};
+
+/* DOS name structure */
+struct dos_name_t {
+ u8 name[DOS_NAME_LENGTH];
+ u8 name_case;
+};
+
+/* unicode name structure */
+struct uni_name_t {
+ u16 name[MAX_NAME_LENGTH];
+ u16 name_hash;
+ u8 name_len;
+};
+
+struct buf_cache_t {
+ struct buf_cache_t *next;
+ struct buf_cache_t *prev;
+ struct buf_cache_t *hash_next;
+ struct buf_cache_t *hash_prev;
+ s32 drv;
+ sector_t sec;
+ u32 flag;
+ struct buffer_head *buf_bh;
+};
+
+struct fs_func {
+ s32 (*alloc_cluster)(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+ void (*free_cluster)(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+ s32 (*count_used_clusters)(struct super_block *sb);
+
+ s32 (*init_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, u32 type, u32 start_clu, u64 size);
+ s32 (*init_ext_entry)(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+ s32 (*find_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+ void (*delete_dir_entry)(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ s32 offset, s32 num_entries);
+ void (*get_uni_name_from_ext_entry)(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+ s32 (*count_ext_entries)(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ struct dentry_t *p_entry);
+ s32 (*calc_num_entries)(struct uni_name_t *p_uniname);
+
+ u32 (*get_entry_type)(struct dentry_t *p_entry);
+ void (*set_entry_type)(struct dentry_t *p_entry, u32 type);
+ u32 (*get_entry_attr)(struct dentry_t *p_entry);
+ void (*set_entry_attr)(struct dentry_t *p_entry, u32 attr);
+ u8 (*get_entry_flag)(struct dentry_t *p_entry);
+ void (*set_entry_flag)(struct dentry_t *p_entry, u8 flag);
+ u32 (*get_entry_clu0)(struct dentry_t *p_entry);
+ void (*set_entry_clu0)(struct dentry_t *p_entry, u32 clu0);
+ u64 (*get_entry_size)(struct dentry_t *p_entry);
+ void (*set_entry_size)(struct dentry_t *p_entry, u64 size);
+ void (*get_entry_time)(struct dentry_t *p_entry,
+ struct timestamp_t *tp, u8 mode);
+ void (*set_entry_time)(struct dentry_t *p_entry,
+ struct timestamp_t *tp, u8 mode);
+};
+
+struct fs_info_t {
+ u32 drv; /* drive ID */
+ u32 vol_type; /* volume FAT type */
+ u32 vol_id; /* volume serial number */
+
+ u64 num_sectors; /* num of sectors in volume */
+ u32 num_clusters; /* num of clusters in volume */
+ u32 cluster_size; /* cluster size in bytes */
+ u32 cluster_size_bits;
+ u32 sectors_per_clu; /* cluster size in sectors */
+ u32 sectors_per_clu_bits;
+
+ u32 PBR_sector; /* PBR sector */
+ u32 FAT1_start_sector; /* FAT1 start sector */
+ u32 FAT2_start_sector; /* FAT2 start sector */
+ u32 root_start_sector; /* root dir start sector */
+ u32 data_start_sector; /* data area start sector */
+ u32 num_FAT_sectors; /* num of FAT sectors */
+
+ u32 root_dir; /* root dir cluster */
+ u32 dentries_in_root; /* num of dentries in root dir */
+ u32 dentries_per_clu; /* num of dentries per cluster */
+
+ u32 vol_flag; /* volume dirty flag */
+ struct buffer_head *pbr_bh; /* PBR sector */
+
+ u32 map_clu; /* allocation bitmap start cluster */
+ u32 map_sectors; /* num of allocation bitmap sectors */
+ struct buffer_head **vol_amap; /* allocation bitmap */
+
+ u16 **vol_utbl; /* upcase table */
+
+ u32 clu_srch_ptr; /* cluster search pointer */
+ u32 used_clusters; /* number of used clusters */
+ struct uentry_t hint_uentry; /* unused entry hint information */
+
+ u32 dev_ejected; /* block device operation error flag */
+
+ struct fs_func *fs_func;
+ struct semaphore v_sem;
+
+ /* FAT cache */
+ struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE];
+ struct buf_cache_t FAT_cache_lru_list;
+ struct buf_cache_t FAT_cache_hash_list[FAT_CACHE_HASH_SIZE];
+
+ /* buf cache */
+ struct buf_cache_t buf_cache_array[BUF_CACHE_SIZE];
+ struct buf_cache_t buf_cache_lru_list;
+ struct buf_cache_t buf_cache_hash_list[BUF_CACHE_HASH_SIZE];
+};
+
+#define ES_2_ENTRIES 2
+#define ES_3_ENTRIES 3
+#define ES_ALL_ENTRIES 0
+
+struct entry_set_cache_t {
+ /* sector number that contains file_entry */
+ sector_t sector;
+
+ /* byte offset in the sector */
+ s32 offset;
+
+ /*
+ * flag in stream entry.
+ * 01 for cluster chain,
+ * 03 for contig. clusteres.
+ */
+ s32 alloc_flag;
+
+ u32 num_entries;
+
+ /* __buf should be the last member */
+ void *__buf;
+};
+
+#define EXFAT_ERRORS_CONT 1 /* ignore error and continue */
+#define EXFAT_ERRORS_PANIC 2 /* panic on error */
+#define EXFAT_ERRORS_RO 3 /* remount r/o on error */
+
+/* ioctl command */
+#define EXFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
+
+struct exfat_mount_options {
+ kuid_t fs_uid;
+ kgid_t fs_gid;
+ unsigned short fs_fmask;
+ unsigned short fs_dmask;
+
+ /* permission for setting the [am]time */
+ unsigned short allow_utime;
+
+ /* codepage for shortname conversions */
+ unsigned short codepage;
+
+ /* charset for filename input/display */
+ char *iocharset;
+
+ unsigned char casesensitive;
+
+ /* on error: continue, panic, remount-ro */
+ unsigned char errors;
+#ifdef CONFIG_EXFAT_DISCARD
+ /* flag on if -o dicard specified and device support discard() */
+ unsigned char discard;
+#endif /* CONFIG_EXFAT_DISCARD */
+};
+
+#define EXFAT_HASH_BITS 8
+#define EXFAT_HASH_SIZE BIT(EXFAT_HASH_BITS)
+
+/*
+ * EXFAT file system in-core superblock data
+ */
+struct bd_info_t {
+ s32 sector_size; /* in bytes */
+ s32 sector_size_bits;
+ s32 sector_size_mask;
+
+ /* total number of sectors in this block device */
+ s32 num_sectors;
+
+ /* opened or not */
+ bool opened;
+};
+
+struct exfat_sb_info {
+ struct fs_info_t fs_info;
+ struct bd_info_t bd_info;
+
+ struct exfat_mount_options options;
+
+ int s_dirt;
+ struct mutex s_lock;
+ struct nls_table *nls_disk; /* Codepage used on disk */
+ struct nls_table *nls_io; /* Charset used for input and display */
+
+ struct inode *fat_inode;
+
+ spinlock_t inode_hash_lock;
+ struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ long debug_flags;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+};
+
+/*
+ * EXFAT file system inode data in memory
+ */
+struct exfat_inode_info {
+ struct file_id_t fid;
+ char *target;
+ /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */
+ loff_t mmu_private; /* physically allocated size */
+ loff_t i_pos; /* on-disk position of directory entry or 0 */
+ struct hlist_node i_hash_fat; /* hash by i_location */
+ struct rw_semaphore truncate_lock;
+ struct inode vfs_inode;
+ struct rw_semaphore i_alloc_sem; /* protect bmap against truncate */
+};
+
+#define EXFAT_SB(sb) ((struct exfat_sb_info *)((sb)->s_fs_info))
+
+static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
+{
+ return container_of(inode, struct exfat_inode_info, vfs_inode);
+}
+
+/* NLS management function */
+u16 nls_upper(struct super_block *sb, u16 a);
+int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b);
+int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b);
+void nls_uniname_to_dosname(struct super_block *sb,
+ struct dos_name_t *p_dosname,
+ struct uni_name_t *p_uniname, bool *p_lossy);
+void nls_dosname_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
+ struct uni_name_t *p_uniname);
+void nls_cstring_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname, u8 *p_cstring,
+ bool *p_lossy);
+
+/* buffer cache management */
+void buf_init(struct super_block *sb);
+void buf_shutdown(struct super_block *sb);
+int FAT_read(struct super_block *sb, u32 loc, u32 *content);
+s32 FAT_write(struct super_block *sb, u32 loc, u32 content);
+u8 *FAT_getblk(struct super_block *sb, sector_t sec);
+void FAT_modify(struct super_block *sb, sector_t sec);
+void FAT_release_all(struct super_block *sb);
+void FAT_sync(struct super_block *sb);
+u8 *buf_getblk(struct super_block *sb, sector_t sec);
+void buf_modify(struct super_block *sb, sector_t sec);
+void buf_lock(struct super_block *sb, sector_t sec);
+void buf_unlock(struct super_block *sb, sector_t sec);
+void buf_release(struct super_block *sb, sector_t sec);
+void buf_release_all(struct super_block *sb);
+void buf_sync(struct super_block *sb);
+
+/* fs management functions */
+void fs_set_vol_flags(struct super_block *sb, u32 new_flag);
+void fs_error(struct super_block *sb);
+
+/* cluster management functions */
+s32 clear_cluster(struct super_block *sb, u32 clu);
+s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain);
+s32 count_num_clusters(struct super_block *sb, struct chain_t *dir);
+s32 fat_count_used_clusters(struct super_block *sb);
+s32 exfat_count_used_clusters(struct super_block *sb);
+void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len);
+
+/* allocation bitmap management functions */
+s32 load_alloc_bitmap(struct super_block *sb);
+void free_alloc_bitmap(struct super_block *sb);
+s32 set_alloc_bitmap(struct super_block *sb, u32 clu);
+s32 clr_alloc_bitmap(struct super_block *sb, u32 clu);
+u32 test_alloc_bitmap(struct super_block *sb, u32 clu);
+void sync_alloc_bitmap(struct super_block *sb);
+
+/* upcase table management functions */
+s32 load_upcase_table(struct super_block *sb);
+void free_upcase_table(struct super_block *sb);
+
+/* dir entry management functions */
+u32 fat_get_entry_type(struct dentry_t *p_entry);
+u32 exfat_get_entry_type(struct dentry_t *p_entry);
+void fat_set_entry_type(struct dentry_t *p_entry, u32 type);
+void exfat_set_entry_type(struct dentry_t *p_entry, u32 type);
+u32 fat_get_entry_attr(struct dentry_t *p_entry);
+u32 exfat_get_entry_attr(struct dentry_t *p_entry);
+void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+u8 fat_get_entry_flag(struct dentry_t *p_entry);
+u8 exfat_get_entry_flag(struct dentry_t *p_entry);
+void fat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
+u32 fat_get_entry_clu0(struct dentry_t *p_entry);
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
+void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+u64 fat_get_entry_size(struct dentry_t *p_entry);
+u64 exfat_get_entry_size(struct dentry_t *p_entry);
+void fat_set_entry_size(struct dentry_t *p_entry, u64 size);
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
+struct timestamp_t *tm_current(struct timestamp_t *tm);
+void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ u32 type, u32 start_clu, u64 size);
+s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, u32 type, u32 start_clu, u64 size);
+s32 fat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+s32 exfat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu);
+void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum,
+ u16 *uniname);
+void init_file_entry(struct file_dentry_t *ep, u32 type);
+void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu,
+ u64 size);
+void init_name_entry(struct name_dentry_t *ep, u16 *uniname);
+void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries);
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries);
+
+s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ sector_t *sector, s32 *offset);
+struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
+ s32 offset);
+struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, sector_t *sector);
+struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u32 type,
+ struct dentry_t **file_ep);
+void release_entry_set(struct entry_set_cache_t *es);
+s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es);
+s32 write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ struct dentry_t *ep, u32 count);
+s32 search_deleted_or_unused_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 num_entries);
+s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir,
+ s32 num_entries);
+s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry);
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry);
+s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
+ u32 type);
+void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry);
+void update_dir_checksum_with_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es);
+bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir);
+
+/* name conversion functions */
+s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 *entries,
+ struct dos_name_t *p_dosname);
+void get_uni_name_from_dos_entry(struct super_block *sb,
+ struct dos_dentry_t *ep,
+ struct uni_name_t *p_uniname, u8 mode);
+void fat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep,
+ u16 *uniname, s32 order);
+s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep,
+ u16 *uniname, s32 order);
+s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct dos_name_t *p_dosname);
+void fat_attach_count_to_dos_name(u8 *dosname, s32 count);
+s32 fat_calc_num_entries(struct uni_name_t *p_uniname);
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
+u8 calc_checksum_1byte(void *data, s32 len, u8 chksum);
+u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type);
+u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type);
+
+/* name resolution functions */
+s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname);
+s32 resolve_name(u8 *name, u8 **arg);
+
+/* file operation functions */
+s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
+s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
+s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
+s32 create_dir(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, struct file_id_t *fid);
+s32 create_file(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid);
+void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry);
+s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
+ struct uni_name_t *p_uniname, struct file_id_t *fid);
+s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
+ struct chain_t *p_newdir, struct uni_name_t *p_uniname,
+ struct file_id_t *fid);
+
+/* sector read/write functions */
+int sector_read(struct super_block *sb, sector_t sec,
+ struct buffer_head **bh, bool read);
+int sector_write(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh, bool sync);
+int multi_sector_read(struct super_block *sb, sector_t sec,
+ struct buffer_head **bh, s32 num_secs, bool read);
+int multi_sector_write(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh, s32 num_secs, bool sync);
+
+void bdev_open(struct super_block *sb);
+void bdev_close(struct super_block *sb);
+int bdev_read(struct super_block *sb, sector_t secno,
+ struct buffer_head **bh, u32 num_secs, bool read);
+int bdev_write(struct super_block *sb, sector_t secno,
+ struct buffer_head *bh, u32 num_secs, bool sync);
+int bdev_sync(struct super_block *sb);
+
+extern const u8 uni_upcase[];
+#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
new file mode 100644
index 000000000000..f086c75e7076
--- /dev/null
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include "exfat.h"
+
+void bdev_open(struct super_block *sb)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bd->opened)
+ return;
+
+ p_bd->sector_size = bdev_logical_block_size(sb->s_bdev);
+ p_bd->sector_size_bits = ilog2(p_bd->sector_size);
+ p_bd->sector_size_mask = p_bd->sector_size - 1;
+ p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >>
+ p_bd->sector_size_bits;
+ p_bd->opened = true;
+}
+
+void bdev_close(struct super_block *sb)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ p_bd->opened = false;
+}
+
+int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
+ u32 num_secs, bool read)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
+ return FFS_MEDIAERR;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ if (!p_bd->opened)
+ return FFS_MEDIAERR;
+
+ if (*bh)
+ __brelse(*bh);
+
+ if (read)
+ *bh = __bread(sb->s_bdev, secno,
+ num_secs << p_bd->sector_size_bits);
+ else
+ *bh = __getblk(sb->s_bdev, secno,
+ num_secs << p_bd->sector_size_bits);
+
+ if (*bh)
+ return 0;
+
+ WARN(!p_fs->dev_ejected,
+ "[EXFAT] No bh, device seems wrong or to be ejected.\n");
+
+ return FFS_MEDIAERR;
+}
+
+int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
+ u32 num_secs, bool sync)
+{
+ s32 count;
+ struct buffer_head *bh2;
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
+ return FFS_MEDIAERR;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ if (!p_bd->opened)
+ return FFS_MEDIAERR;
+
+ if (secno == bh->b_blocknr) {
+ lock_buffer(bh);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+ if (sync && (sync_dirty_buffer(bh) != 0))
+ return FFS_MEDIAERR;
+ } else {
+ count = num_secs << p_bd->sector_size_bits;
+
+ bh2 = __getblk(sb->s_bdev, secno, count);
+ if (!bh2)
+ goto no_bh;
+
+ lock_buffer(bh2);
+ memcpy(bh2->b_data, bh->b_data, count);
+ set_buffer_uptodate(bh2);
+ mark_buffer_dirty(bh2);
+ unlock_buffer(bh2);
+ if (sync && (sync_dirty_buffer(bh2) != 0)) {
+ __brelse(bh2);
+ goto no_bh;
+ }
+ __brelse(bh2);
+ }
+
+ return 0;
+
+no_bh:
+ WARN(!p_fs->dev_ejected,
+ "[EXFAT] No bh, device seems wrong or to be ejected.\n");
+
+ return FFS_MEDIAERR;
+}
+
+int bdev_sync(struct super_block *sb)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
+ return FFS_MEDIAERR;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ if (!p_bd->opened)
+ return FFS_MEDIAERR;
+
+ return sync_blockdev(sb->s_bdev);
+}
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
new file mode 100644
index 000000000000..1565ce65d39f
--- /dev/null
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include "exfat.h"
+
+#define LOCKBIT 0x01
+#define DIRTYBIT 0x02
+
+/* Local variables */
+static DEFINE_SEMAPHORE(f_sem);
+static DEFINE_SEMAPHORE(b_sem);
+
+static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec)
+{
+ s32 off;
+ struct buf_cache_t *bp, *hp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ off = (sec +
+ (sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->FAT_cache_hash_list[off];
+ for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) {
+ if ((bp->drv == p_fs->drv) && (bp->sec == sec)) {
+ WARN(!bp->buf_bh,
+ "[EXFAT] FAT_cache has no bh. It will make system panic.\n");
+
+ touch_buffer(bp->buf_bh);
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+static void push_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->next = list->next;
+ bp->prev = list;
+ list->next->prev = bp;
+ list->next = bp;
+}
+
+static void push_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->prev = list->prev;
+ bp->next = list;
+ list->prev->next = bp;
+ list->prev = bp;
+}
+
+static void move_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->prev->next = bp->next;
+ bp->next->prev = bp->prev;
+ push_to_mru(bp, list);
+}
+
+static void move_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->prev->next = bp->next;
+ bp->next->prev = bp->prev;
+ push_to_lru(bp, list);
+}
+
+static struct buf_cache_t *FAT_cache_get(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = p_fs->FAT_cache_lru_list.prev;
+
+ move_to_mru(bp, &p_fs->FAT_cache_lru_list);
+ return bp;
+}
+
+static void FAT_cache_insert_hash(struct super_block *sb,
+ struct buf_cache_t *bp)
+{
+ s32 off;
+ struct buf_cache_t *hp;
+ struct fs_info_t *p_fs;
+
+ p_fs = &(EXFAT_SB(sb)->fs_info);
+ off = (bp->sec +
+ (bp->sec >> p_fs->sectors_per_clu_bits)) &
+ (FAT_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->FAT_cache_hash_list[off];
+ bp->hash_next = hp->hash_next;
+ bp->hash_prev = hp;
+ hp->hash_next->hash_prev = bp;
+ hp->hash_next = bp;
+}
+
+static void FAT_cache_remove_hash(struct buf_cache_t *bp)
+{
+ (bp->hash_prev)->hash_next = bp->hash_next;
+ (bp->hash_next)->hash_prev = bp->hash_prev;
+}
+
+static void buf_cache_insert_hash(struct super_block *sb,
+ struct buf_cache_t *bp)
+{
+ s32 off;
+ struct buf_cache_t *hp;
+ struct fs_info_t *p_fs;
+
+ p_fs = &(EXFAT_SB(sb)->fs_info);
+ off = (bp->sec +
+ (bp->sec >> p_fs->sectors_per_clu_bits)) &
+ (BUF_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->buf_cache_hash_list[off];
+ bp->hash_next = hp->hash_next;
+ bp->hash_prev = hp;
+ hp->hash_next->hash_prev = bp;
+ hp->hash_next = bp;
+}
+
+static void buf_cache_remove_hash(struct buf_cache_t *bp)
+{
+ (bp->hash_prev)->hash_next = bp->hash_next;
+ (bp->hash_next)->hash_prev = bp->hash_prev;
+}
+
+void buf_init(struct super_block *sb)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ int i;
+
+ /* LRU list */
+ p_fs->FAT_cache_lru_list.next = &p_fs->FAT_cache_lru_list;
+ p_fs->FAT_cache_lru_list.prev = &p_fs->FAT_cache_lru_list;
+
+ for (i = 0; i < FAT_CACHE_SIZE; i++) {
+ p_fs->FAT_cache_array[i].drv = -1;
+ p_fs->FAT_cache_array[i].sec = ~0;
+ p_fs->FAT_cache_array[i].flag = 0;
+ p_fs->FAT_cache_array[i].buf_bh = NULL;
+ p_fs->FAT_cache_array[i].prev = NULL;
+ p_fs->FAT_cache_array[i].next = NULL;
+ push_to_mru(&p_fs->FAT_cache_array[i],
+ &p_fs->FAT_cache_lru_list);
+ }
+
+ p_fs->buf_cache_lru_list.next = &p_fs->buf_cache_lru_list;
+ p_fs->buf_cache_lru_list.prev = &p_fs->buf_cache_lru_list;
+
+ for (i = 0; i < BUF_CACHE_SIZE; i++) {
+ p_fs->buf_cache_array[i].drv = -1;
+ p_fs->buf_cache_array[i].sec = ~0;
+ p_fs->buf_cache_array[i].flag = 0;
+ p_fs->buf_cache_array[i].buf_bh = NULL;
+ p_fs->buf_cache_array[i].prev = NULL;
+ p_fs->buf_cache_array[i].next = NULL;
+ push_to_mru(&p_fs->buf_cache_array[i],
+ &p_fs->buf_cache_lru_list);
+ }
+
+ /* HASH list */
+ for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) {
+ p_fs->FAT_cache_hash_list[i].drv = -1;
+ p_fs->FAT_cache_hash_list[i].sec = ~0;
+ p_fs->FAT_cache_hash_list[i].hash_next =
+ &p_fs->FAT_cache_hash_list[i];
+ p_fs->FAT_cache_hash_list[i].hash_prev =
+ &p_fs->FAT_cache_hash_list[i];
+ }
+
+ for (i = 0; i < FAT_CACHE_SIZE; i++)
+ FAT_cache_insert_hash(sb, &p_fs->FAT_cache_array[i]);
+
+ for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) {
+ p_fs->buf_cache_hash_list[i].drv = -1;
+ p_fs->buf_cache_hash_list[i].sec = ~0;
+ p_fs->buf_cache_hash_list[i].hash_next =
+ &p_fs->buf_cache_hash_list[i];
+ p_fs->buf_cache_hash_list[i].hash_prev =
+ &p_fs->buf_cache_hash_list[i];
+ }
+
+ for (i = 0; i < BUF_CACHE_SIZE; i++)
+ buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]);
+}
+
+void buf_shutdown(struct super_block *sb)
+{
+}
+
+static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
+{
+ s32 off;
+ u32 _content;
+ sector_t sec;
+ u8 *fat_sector, *fat_entry;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_fs->vol_type == FAT12) {
+ sec = p_fs->FAT1_start_sector +
+ ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
+ off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+
+ if (off == (p_bd->sector_size - 1)) {
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ _content = (u32)fat_sector[off];
+
+ fat_sector = FAT_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -1;
+
+ _content |= (u32)fat_sector[0] << 8;
+ } else {
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+ _content = GET16(fat_entry);
+ }
+
+ if (loc & 1)
+ _content >>= 4;
+
+ _content &= 0x00000FFF;
+
+ if (_content >= CLUSTER_16(0x0FF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ } else if (p_fs->vol_type == FAT16) {
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 1));
+ off = (loc << 1) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ _content = GET16_A(fat_entry);
+
+ _content &= 0x0000FFFF;
+
+ if (_content >= CLUSTER_16(0xFFF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ } else if (p_fs->vol_type == FAT32) {
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ _content = GET32_A(fat_entry);
+
+ _content &= 0x0FFFFFFF;
+
+ if (_content >= CLUSTER_32(0x0FFFFFF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ } else if (p_fs->vol_type == EXFAT) {
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+ _content = GET32_A(fat_entry);
+
+ if (_content >= CLUSTER_32(0xFFFFFFF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ }
+
+ /* Unknown volume type, throw in the towel and go home */
+ *content = CLUSTER_32(~0);
+ return 0;
+}
+
+/* in : sb, loc
+ * out: content
+ * returns 0 on success
+ * -1 on error
+ */
+int FAT_read(struct super_block *sb, u32 loc, u32 *content)
+{
+ s32 ret;
+
+ down(&f_sem);
+ ret = __FAT_read(sb, loc, content);
+ up(&f_sem);
+
+ return ret;
+}
+
+static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
+{
+ s32 off;
+ sector_t sec;
+ u8 *fat_sector, *fat_entry;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_fs->vol_type == FAT12) {
+ content &= 0x00000FFF;
+
+ sec = p_fs->FAT1_start_sector +
+ ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
+ off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ if (loc & 1) { /* odd */
+ content <<= 4;
+
+ if (off == (p_bd->sector_size - 1)) {
+ fat_sector[off] = (u8)(content |
+ (fat_sector[off] &
+ 0x0F));
+ FAT_modify(sb, sec);
+
+ fat_sector = FAT_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_sector[0] = (u8)(content >> 8);
+ } else {
+ fat_entry = &fat_sector[off];
+ content |= GET16(fat_entry) & 0x000F;
+
+ SET16(fat_entry, content);
+ }
+ } else { /* even */
+ fat_sector[off] = (u8)(content);
+
+ if (off == (p_bd->sector_size - 1)) {
+ fat_sector[off] = (u8)(content);
+ FAT_modify(sb, sec);
+
+ fat_sector = FAT_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -1;
+ fat_sector[0] = (u8)((fat_sector[0] & 0xF0) |
+ (content >> 8));
+ } else {
+ fat_entry = &fat_sector[off];
+ content |= GET16(fat_entry) & 0xF000;
+
+ SET16(fat_entry, content);
+ }
+ }
+ }
+
+ else if (p_fs->vol_type == FAT16) {
+ content &= 0x0000FFFF;
+
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 1));
+ off = (loc << 1) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ SET16_A(fat_entry, content);
+ } else if (p_fs->vol_type == FAT32) {
+ content &= 0x0FFFFFFF;
+
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ content |= GET32_A(fat_entry) & 0xF0000000;
+
+ SET32_A(fat_entry, content);
+ } else { /* p_fs->vol_type == EXFAT */
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ SET32_A(fat_entry, content);
+ }
+
+ FAT_modify(sb, sec);
+ return 0;
+}
+
+int FAT_write(struct super_block *sb, u32 loc, u32 content)
+{
+ s32 ret;
+
+ down(&f_sem);
+ ret = __FAT_write(sb, loc, content);
+ up(&f_sem);
+
+ return ret;
+}
+
+u8 *FAT_getblk(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = FAT_cache_find(sb, sec);
+ if (bp) {
+ move_to_mru(bp, &p_fs->FAT_cache_lru_list);
+ return bp->buf_bh->b_data;
+ }
+
+ bp = FAT_cache_get(sb, sec);
+
+ FAT_cache_remove_hash(bp);
+
+ bp->drv = p_fs->drv;
+ bp->sec = sec;
+ bp->flag = 0;
+
+ FAT_cache_insert_hash(sb, bp);
+
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ FAT_cache_remove_hash(bp);
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+ bp->buf_bh = NULL;
+
+ move_to_lru(bp, &p_fs->FAT_cache_lru_list);
+ return NULL;
+ }
+
+ return bp->buf_bh->b_data;
+}
+
+void FAT_modify(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ bp = FAT_cache_find(sb, sec);
+ if (bp)
+ sector_write(sb, sec, bp->buf_bh, 0);
+}
+
+void FAT_release_all(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&f_sem);
+
+ bp = p_fs->FAT_cache_lru_list.next;
+ while (bp != &p_fs->FAT_cache_lru_list) {
+ if (bp->drv == p_fs->drv) {
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->buf_bh) {
+ __brelse(bp->buf_bh);
+ bp->buf_bh = NULL;
+ }
+ }
+ bp = bp->next;
+ }
+
+ up(&f_sem);
+}
+
+void FAT_sync(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&f_sem);
+
+ bp = p_fs->FAT_cache_lru_list.next;
+ while (bp != &p_fs->FAT_cache_lru_list) {
+ if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) {
+ sync_dirty_buffer(bp->buf_bh);
+ bp->flag &= ~(DIRTYBIT);
+ }
+ bp = bp->next;
+ }
+
+ up(&f_sem);
+}
+
+static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec)
+{
+ s32 off;
+ struct buf_cache_t *bp, *hp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ off = (sec + (sec >> p_fs->sectors_per_clu_bits)) &
+ (BUF_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->buf_cache_hash_list[off];
+ for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) {
+ if ((bp->drv == p_fs->drv) && (bp->sec == sec)) {
+ touch_buffer(bp->buf_bh);
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = p_fs->buf_cache_lru_list.prev;
+ while (bp->flag & LOCKBIT)
+ bp = bp->prev;
+
+ move_to_mru(bp, &p_fs->buf_cache_lru_list);
+ return bp;
+}
+
+static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = buf_cache_find(sb, sec);
+ if (bp) {
+ move_to_mru(bp, &p_fs->buf_cache_lru_list);
+ return bp->buf_bh->b_data;
+ }
+
+ bp = buf_cache_get(sb, sec);
+
+ buf_cache_remove_hash(bp);
+
+ bp->drv = p_fs->drv;
+ bp->sec = sec;
+ bp->flag = 0;
+
+ buf_cache_insert_hash(sb, bp);
+
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ buf_cache_remove_hash(bp);
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+ bp->buf_bh = NULL;
+
+ move_to_lru(bp, &p_fs->buf_cache_lru_list);
+ return NULL;
+ }
+
+ return bp->buf_bh->b_data;
+}
+
+u8 *buf_getblk(struct super_block *sb, sector_t sec)
+{
+ u8 *buf;
+
+ down(&b_sem);
+ buf = __buf_getblk(sb, sec);
+ up(&b_sem);
+
+ return buf;
+}
+
+void buf_modify(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp))
+ sector_write(sb, sec, bp->buf_bh, 0);
+
+ WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
+ (unsigned long long)sec);
+
+ up(&b_sem);
+}
+
+void buf_lock(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp))
+ bp->flag |= LOCKBIT;
+
+ WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
+ (unsigned long long)sec);
+
+ up(&b_sem);
+}
+
+void buf_unlock(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp))
+ bp->flag &= ~(LOCKBIT);
+
+ WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
+ (unsigned long long)sec);
+
+ up(&b_sem);
+}
+
+void buf_release(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp)) {
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->buf_bh) {
+ __brelse(bp->buf_bh);
+ bp->buf_bh = NULL;
+ }
+
+ move_to_lru(bp, &p_fs->buf_cache_lru_list);
+ }
+
+ up(&b_sem);
+}
+
+void buf_release_all(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&b_sem);
+
+ bp = p_fs->buf_cache_lru_list.next;
+ while (bp != &p_fs->buf_cache_lru_list) {
+ if (bp->drv == p_fs->drv) {
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->buf_bh) {
+ __brelse(bp->buf_bh);
+ bp->buf_bh = NULL;
+ }
+ }
+ bp = bp->next;
+ }
+
+ up(&b_sem);
+}
+
+void buf_sync(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&b_sem);
+
+ bp = p_fs->buf_cache_lru_list.next;
+ while (bp != &p_fs->buf_cache_lru_list) {
+ if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) {
+ sync_dirty_buffer(bp->buf_bh);
+ bp->flag &= ~(DIRTYBIT);
+ }
+ bp = bp->next;
+ }
+
+ up(&b_sem);
+}
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
new file mode 100644
index 000000000000..da8c58149c35
--- /dev/null
+++ b/drivers/staging/exfat/exfat_core.c
@@ -0,0 +1,3703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include "exfat.h"
+
+static void __set_sb_dirty(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ sbi->s_dirt = 1;
+}
+
+static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE];
+
+static char *reserved_names[] = {
+ "AUX ", "CON ", "NUL ", "PRN ",
+ "COM1 ", "COM2 ", "COM3 ", "COM4 ",
+ "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ",
+ "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
+ "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ",
+ NULL
+};
+
+static u8 free_bit[] = {
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 40 ~ 59 */
+ 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 60 ~ 79 */
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, /* 80 ~ 99 */
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, /* 100 ~ 119 */
+ 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 120 ~ 139 */
+ 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, /* 140 ~ 159 */
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 160 ~ 179 */
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, /* 180 ~ 199 */
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 200 ~ 219 */
+ 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 220 ~ 239 */
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /* 240 ~ 254 */
+};
+
+static u8 used_bit[] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, /* 0 ~ 19 */
+ 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, /* 20 ~ 39 */
+ 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, /* 40 ~ 59 */
+ 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, /* 60 ~ 79 */
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, /* 80 ~ 99 */
+ 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, /* 100 ~ 119 */
+ 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, /* 120 ~ 139 */
+ 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, /* 140 ~ 159 */
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, /* 160 ~ 179 */
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, /* 180 ~ 199 */
+ 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, /* 200 ~ 219 */
+ 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, /* 220 ~ 239 */
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /* 240 ~ 255 */
+};
+
+#define BITMAP_LOC(v) ((v) >> 3)
+#define BITMAP_SHIFT(v) ((v) & 0x07)
+
+static inline s32 exfat_bitmap_test(u8 *bitmap, int i)
+{
+ u8 data;
+
+ data = bitmap[BITMAP_LOC(i)];
+ if ((data >> BITMAP_SHIFT(i)) & 0x01)
+ return 1;
+ return 0;
+}
+
+static inline void exfat_bitmap_set(u8 *bitmap, int i)
+{
+ bitmap[BITMAP_LOC(i)] |= (0x01 << BITMAP_SHIFT(i));
+}
+
+static inline void exfat_bitmap_clear(u8 *bitmap, int i)
+{
+ bitmap[BITMAP_LOC(i)] &= ~(0x01 << BITMAP_SHIFT(i));
+}
+
+/*
+ * File System Management Functions
+ */
+
+void fs_set_vol_flags(struct super_block *sb, u32 new_flag)
+{
+ struct pbr_sector_t *p_pbr;
+ struct bpbex_t *p_bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_fs->vol_flag == new_flag)
+ return;
+
+ p_fs->vol_flag = new_flag;
+
+ if (p_fs->vol_type == EXFAT) {
+ if (p_fs->pbr_bh == NULL) {
+ if (sector_read(sb, p_fs->PBR_sector,
+ &p_fs->pbr_bh, 1) != FFS_SUCCESS)
+ return;
+ }
+
+ p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
+ p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ SET16(p_bpb->vol_flags, (u16)new_flag);
+
+ /* XXX duyoung
+ * what can we do here? (cuz fs_set_vol_flags() is void)
+ */
+ if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
+ else
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
+ }
+}
+
+void fs_error(struct super_block *sb)
+{
+ struct exfat_mount_options *opts = &EXFAT_SB(sb)->options;
+
+ if (opts->errors == EXFAT_ERRORS_PANIC) {
+ panic("[EXFAT] Filesystem panic from previous error\n");
+ } else if ((opts->errors == EXFAT_ERRORS_RO) && !sb_rdonly(sb)) {
+ sb->s_flags |= SB_RDONLY;
+ pr_err("[EXFAT] Filesystem has been set read-only\n");
+ }
+}
+
+/*
+ * Cluster Management Functions
+ */
+
+s32 clear_cluster(struct super_block *sb, u32 clu)
+{
+ sector_t s, n;
+ s32 ret = FFS_SUCCESS;
+ struct buffer_head *tmp_bh = NULL;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (clu == CLUSTER_32(0)) { /* FAT16 root_dir */
+ s = p_fs->root_start_sector;
+ n = p_fs->data_start_sector;
+ } else {
+ s = START_SECTOR(clu);
+ n = s + p_fs->sectors_per_clu;
+ }
+
+ for (; s < n; s++) {
+ ret = sector_read(sb, s, &tmp_bh, 0);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size);
+ ret = sector_write(sb, s, tmp_bh, 0);
+ if (ret != FFS_SUCCESS)
+ break;
+ }
+
+ brelse(tmp_bh);
+ return ret;
+}
+
+s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain)
+{
+ int i, num_clusters = 0;
+ u32 new_clu, last_clu = CLUSTER_32(~0), read_clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ new_clu = p_chain->dir;
+ if (new_clu == CLUSTER_32(~0))
+ new_clu = p_fs->clu_srch_ptr;
+ else if (new_clu >= p_fs->num_clusters)
+ new_clu = 2;
+
+ __set_sb_dirty(sb);
+
+ p_chain->dir = CLUSTER_32(~0);
+
+ for (i = 2; i < p_fs->num_clusters; i++) {
+ if (FAT_read(sb, new_clu, &read_clu) != 0)
+ return -1;
+
+ if (read_clu == CLUSTER_32(0)) {
+ if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -1;
+ num_clusters++;
+
+ if (p_chain->dir == CLUSTER_32(~0)) {
+ p_chain->dir = new_clu;
+ } else {
+ if (FAT_write(sb, last_clu, new_clu) < 0)
+ return -1;
+ }
+
+ last_clu = new_clu;
+
+ if ((--num_alloc) == 0) {
+ p_fs->clu_srch_ptr = new_clu;
+ if (p_fs->used_clusters != (u32) ~0)
+ p_fs->used_clusters += num_clusters;
+
+ return num_clusters;
+ }
+ }
+ if ((++new_clu) >= p_fs->num_clusters)
+ new_clu = 2;
+ }
+
+ p_fs->clu_srch_ptr = new_clu;
+ if (p_fs->used_clusters != (u32) ~0)
+ p_fs->used_clusters += num_clusters;
+
+ return num_clusters;
+}
+
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain)
+{
+ s32 num_clusters = 0;
+ u32 hint_clu, new_clu, last_clu = CLUSTER_32(~0);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ hint_clu = p_chain->dir;
+ if (hint_clu == CLUSTER_32(~0)) {
+ hint_clu = test_alloc_bitmap(sb, p_fs->clu_srch_ptr-2);
+ if (hint_clu == CLUSTER_32(~0))
+ return 0;
+ } else if (hint_clu >= p_fs->num_clusters) {
+ hint_clu = 2;
+ p_chain->flags = 0x01;
+ }
+
+ __set_sb_dirty(sb);
+
+ p_chain->dir = CLUSTER_32(~0);
+
+ while ((new_clu = test_alloc_bitmap(sb, hint_clu-2)) != CLUSTER_32(~0)) {
+ if (new_clu != hint_clu) {
+ if (p_chain->flags == 0x03) {
+ exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters);
+ p_chain->flags = 0x01;
+ }
+ }
+
+ if (set_alloc_bitmap(sb, new_clu-2) != FFS_SUCCESS)
+ return -1;
+
+ num_clusters++;
+
+ if (p_chain->flags == 0x01) {
+ if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -1;
+ }
+
+ if (p_chain->dir == CLUSTER_32(~0)) {
+ p_chain->dir = new_clu;
+ } else {
+ if (p_chain->flags == 0x01) {
+ if (FAT_write(sb, last_clu, new_clu) < 0)
+ return -1;
+ }
+ }
+ last_clu = new_clu;
+
+ if ((--num_alloc) == 0) {
+ p_fs->clu_srch_ptr = hint_clu;
+ if (p_fs->used_clusters != (u32) ~0)
+ p_fs->used_clusters += num_clusters;
+
+ p_chain->size += num_clusters;
+ return num_clusters;
+ }
+
+ hint_clu = new_clu + 1;
+ if (hint_clu >= p_fs->num_clusters) {
+ hint_clu = 2;
+
+ if (p_chain->flags == 0x03) {
+ exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters);
+ p_chain->flags = 0x01;
+ }
+ }
+ }
+
+ p_fs->clu_srch_ptr = hint_clu;
+ if (p_fs->used_clusters != (u32) ~0)
+ p_fs->used_clusters += num_clusters;
+
+ p_chain->size += num_clusters;
+ return num_clusters;
+}
+
+void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse)
+{
+ s32 num_clusters = 0;
+ u32 clu, prev;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int i;
+ sector_t sector;
+
+ if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
+ return;
+ __set_sb_dirty(sb);
+ clu = p_chain->dir;
+
+ if (p_chain->size <= 0)
+ return;
+
+ do {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (do_relse) {
+ sector = START_SECTOR(clu);
+ for (i = 0; i < p_fs->sectors_per_clu; i++)
+ buf_release(sb, sector+i);
+ }
+
+ prev = clu;
+ if (FAT_read(sb, clu, &clu) == -1)
+ break;
+
+ if (FAT_write(sb, prev, CLUSTER_32(0)) < 0)
+ break;
+ num_clusters++;
+
+ } while (clu != CLUSTER_32(~0));
+
+ if (p_fs->used_clusters != (u32) ~0)
+ p_fs->used_clusters -= num_clusters;
+}
+
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse)
+{
+ s32 num_clusters = 0;
+ u32 clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int i;
+ sector_t sector;
+
+ if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
+ return;
+
+ if (p_chain->size <= 0) {
+ pr_err("[EXFAT] free_cluster : skip free-req clu:%u, because of zero-size truncation\n",
+ p_chain->dir);
+ return;
+ }
+
+ __set_sb_dirty(sb);
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ do {
+ if (do_relse) {
+ sector = START_SECTOR(clu);
+ for (i = 0; i < p_fs->sectors_per_clu; i++)
+ buf_release(sb, sector+i);
+ }
+
+ if (clr_alloc_bitmap(sb, clu-2) != FFS_SUCCESS)
+ break;
+ clu++;
+
+ num_clusters++;
+ } while (num_clusters < p_chain->size);
+ } else {
+ do {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (do_relse) {
+ sector = START_SECTOR(clu);
+ for (i = 0; i < p_fs->sectors_per_clu; i++)
+ buf_release(sb, sector+i);
+ }
+
+ if (clr_alloc_bitmap(sb, clu-2) != FFS_SUCCESS)
+ break;
+
+ if (FAT_read(sb, clu, &clu) == -1)
+ break;
+ num_clusters++;
+ } while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0)));
+ }
+
+ if (p_fs->used_clusters != (u32) ~0)
+ p_fs->used_clusters -= num_clusters;
+}
+
+u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
+{
+ u32 clu, next;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ clu += p_chain->size - 1;
+ } else {
+ while ((FAT_read(sb, clu, &next) == 0) &&
+ (next != CLUSTER_32(~0))) {
+ if (p_fs->dev_ejected)
+ break;
+ clu = next;
+ }
+ }
+
+ return clu;
+}
+
+s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
+{
+ int i, count = 0;
+ u32 clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
+ return 0;
+
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ count = p_chain->size;
+ } else {
+ for (i = 2; i < p_fs->num_clusters; i++) {
+ count++;
+ if (FAT_read(sb, clu, &clu) != 0)
+ return 0;
+ if (clu == CLUSTER_32(~0))
+ break;
+ }
+ }
+
+ return count;
+}
+
+s32 fat_count_used_clusters(struct super_block *sb)
+{
+ int i, count = 0;
+ u32 clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = 2; i < p_fs->num_clusters; i++) {
+ if (FAT_read(sb, i, &clu) != 0)
+ break;
+ if (clu != CLUSTER_32(0))
+ count++;
+ }
+
+ return count;
+}
+
+s32 exfat_count_used_clusters(struct super_block *sb)
+{
+ int i, map_i, map_b, count = 0;
+ u8 k;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ map_i = map_b = 0;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *) p_fs->vol_amap[map_i]->b_data) + map_b);
+ count += used_bit[k];
+
+ if ((++map_b) >= p_bd->sector_size) {
+ map_i++;
+ map_b = 0;
+ }
+ }
+
+ return count;
+}
+
+void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len)
+{
+ if (len == 0)
+ return;
+
+ while (len > 1) {
+ if (FAT_write(sb, chain, chain+1) < 0)
+ break;
+ chain++;
+ len--;
+ }
+ FAT_write(sb, chain, CLUSTER_32(~0));
+}
+
+/*
+ * Allocation Bitmap Management Functions
+ */
+
+s32 load_alloc_bitmap(struct super_block *sb)
+{
+ int i, j, ret;
+ u32 map_size;
+ u32 type;
+ sector_t sector;
+ struct chain_t clu;
+ struct bmap_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ clu.dir = p_fs->root_dir;
+ clu.flags = 0x01;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < p_fs->dentries_per_clu; i++) {
+ ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu,
+ i, NULL);
+ if (!ep)
+ return FFS_MEDIAERR;
+
+ type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_BITMAP)
+ continue;
+
+ if (ep->flags == 0x0) {
+ p_fs->map_clu = GET32_A(ep->start_clu);
+ map_size = (u32) GET64_A(ep->size);
+
+ p_fs->map_sectors = ((map_size-1) >> p_bd->sector_size_bits) + 1;
+
+ p_fs->vol_amap = kmalloc_array(p_fs->map_sectors,
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (p_fs->vol_amap == NULL)
+ return FFS_MEMORYERR;
+
+ sector = START_SECTOR(p_fs->map_clu);
+
+ for (j = 0; j < p_fs->map_sectors; j++) {
+ p_fs->vol_amap[j] = NULL;
+ ret = sector_read(sb, sector+j, &(p_fs->vol_amap[j]), 1);
+ if (ret != FFS_SUCCESS) {
+ /* release all buffers and free vol_amap */
+ i = 0;
+ while (i < j)
+ brelse(p_fs->vol_amap[i++]);
+
+ kfree(p_fs->vol_amap);
+ p_fs->vol_amap = NULL;
+ return ret;
+ }
+ }
+
+ p_fs->pbr_bh = NULL;
+ return FFS_SUCCESS;
+ }
+ }
+
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return FFS_MEDIAERR;
+ }
+
+ return FFS_FORMATERR;
+}
+
+void free_alloc_bitmap(struct super_block *sb)
+{
+ int i;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ brelse(p_fs->pbr_bh);
+
+ for (i = 0; i < p_fs->map_sectors; i++)
+ __brelse(p_fs->vol_amap[i]);
+
+ kfree(p_fs->vol_amap);
+ p_fs->vol_amap = NULL;
+}
+
+s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
+
+ sector = START_SECTOR(p_fs->map_clu) + i;
+
+ exfat_bitmap_set((u8 *) p_fs->vol_amap[i]->b_data, b);
+
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+}
+
+s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+#ifdef CONFIG_EXFAT_DISCARD
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+ int ret;
+#endif /* CONFIG_EXFAT_DISCARD */
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
+
+ sector = START_SECTOR(p_fs->map_clu) + i;
+
+ exfat_bitmap_clear((u8 *) p_fs->vol_amap[i]->b_data, b);
+
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard) {
+ ret = sb_issue_discard(sb, START_SECTOR(clu),
+ (1 << p_fs->sectors_per_clu_bits),
+ GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("discard not supported by device, disabling");
+ opts->discard = 0;
+ }
+ }
+#endif /* CONFIG_EXFAT_DISCARD */
+}
+
+u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, map_i, map_b;
+ u32 clu_base, clu_free;
+ u8 k, clu_mask;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ clu_base = (clu & ~(0x7)) + 2;
+ clu_mask = (1 << (clu - clu_base + 2)) - 1;
+
+ map_i = clu >> (p_bd->sector_size_bits + 3);
+ map_b = (clu >> 3) & p_bd->sector_size_mask;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *) p_fs->vol_amap[map_i]->b_data) + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < p_fs->num_clusters)
+ return clu_free;
+ }
+ clu_base += 8;
+
+ if (((++map_b) >= p_bd->sector_size) ||
+ (clu_base >= p_fs->num_clusters)) {
+ if ((++map_i) >= p_fs->map_sectors) {
+ clu_base = 2;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return CLUSTER_32(~0);
+}
+
+void sync_alloc_bitmap(struct super_block *sb)
+{
+ int i;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_fs->vol_amap == NULL)
+ return;
+
+ for (i = 0; i < p_fs->map_sectors; i++)
+ sync_dirty_buffer(p_fs->vol_amap[i]);
+}
+
+/*
+ * Upcase table Management Functions
+ */
+static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
+ u32 num_sectors, u32 utbl_checksum)
+{
+ int i, ret = FFS_ERROR;
+ u32 j;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct buffer_head *tmp_bh = NULL;
+ sector_t end_sector = num_sectors + sector;
+
+ bool skip = false;
+ u32 index = 0;
+ u16 uni = 0;
+ u16 **upcase_table;
+
+ u32 checksum = 0;
+
+ upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
+ GFP_KERNEL);
+ if (upcase_table == NULL)
+ return FFS_MEMORYERR;
+ memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
+
+ while (sector < end_sector) {
+ ret = sector_read(sb, sector, &tmp_bh, 1);
+ if (ret != FFS_SUCCESS) {
+ pr_debug("sector read (0x%llX)fail\n",
+ (unsigned long long)sector);
+ goto error;
+ }
+ sector++;
+
+ for (i = 0; i < p_bd->sector_size && index <= 0xFFFF; i += 2) {
+ uni = GET16(((u8 *) tmp_bh->b_data)+i);
+
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) + *(((u8 *)tmp_bh->b_data) +
+ i);
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) + *(((u8 *)tmp_bh->b_data) +
+ (i + 1));
+
+ if (skip) {
+ pr_debug("skip from 0x%X ", index);
+ index += uni;
+ pr_debug("to 0x%X (amount of 0x%X)\n",
+ index, uni);
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ u16 col_index = get_col_index(index);
+
+ if (upcase_table[col_index] == NULL) {
+ pr_debug("alloc = 0x%X\n", col_index);
+ upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
+ sizeof(u16), GFP_KERNEL);
+ if (upcase_table[col_index] == NULL) {
+ ret = FFS_MEMORYERR;
+ goto error;
+ }
+
+ for (j = 0; j < UTBL_ROW_COUNT; j++)
+ upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
+ }
+
+ upcase_table[col_index][get_row_index(index)] = uni;
+ index++;
+ }
+ }
+ }
+ if (index >= 0xFFFF && utbl_checksum == checksum) {
+ if (tmp_bh)
+ brelse(tmp_bh);
+ return FFS_SUCCESS;
+ }
+ ret = FFS_ERROR;
+error:
+ if (tmp_bh)
+ brelse(tmp_bh);
+ free_upcase_table(sb);
+ return ret;
+}
+
+static s32 __load_default_upcase_table(struct super_block *sb)
+{
+ int i, ret = FFS_ERROR;
+ u32 j;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bool skip = false;
+ u32 index = 0;
+ u16 uni = 0;
+ u16 **upcase_table;
+
+ upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
+ GFP_KERNEL);
+ if (upcase_table == NULL)
+ return FFS_MEMORYERR;
+ memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
+
+ for (i = 0; index <= 0xFFFF && i < NUM_UPCASE*2; i += 2) {
+ uni = GET16(uni_upcase + i);
+ if (skip) {
+ pr_debug("skip from 0x%X ", index);
+ index += uni;
+ pr_debug("to 0x%X (amount of 0x%X)\n", index, uni);
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ u16 col_index = get_col_index(index);
+
+ if (upcase_table[col_index] == NULL) {
+ pr_debug("alloc = 0x%X\n", col_index);
+ upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (upcase_table[col_index] == NULL) {
+ ret = FFS_MEMORYERR;
+ goto error;
+ }
+
+ for (j = 0; j < UTBL_ROW_COUNT; j++)
+ upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
+ }
+
+ upcase_table[col_index][get_row_index(index)] = uni;
+ index++;
+ }
+ }
+
+ if (index >= 0xFFFF)
+ return FFS_SUCCESS;
+
+error:
+ /* FATAL error: default upcase table has error */
+ free_upcase_table(sb);
+ return ret;
+}
+
+s32 load_upcase_table(struct super_block *sb)
+{
+ int i;
+ u32 tbl_clu, tbl_size;
+ sector_t sector;
+ u32 type, num_sectors;
+ struct chain_t clu;
+ struct case_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ clu.dir = p_fs->root_dir;
+ clu.flags = 0x01;
+
+ if (p_fs->dev_ejected)
+ return FFS_MEDIAERR;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ for (i = 0; i < p_fs->dentries_per_clu; i++) {
+ ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu,
+ i, NULL);
+ if (!ep)
+ return FFS_MEDIAERR;
+
+ type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_UPCASE)
+ continue;
+
+ tbl_clu = GET32_A(ep->start_clu);
+ tbl_size = (u32) GET64_A(ep->size);
+
+ sector = START_SECTOR(tbl_clu);
+ num_sectors = ((tbl_size-1) >> p_bd->sector_size_bits) + 1;
+ if (__load_upcase_table(sb, sector, num_sectors,
+ GET32_A(ep->checksum)) != FFS_SUCCESS)
+ break;
+ return FFS_SUCCESS;
+ }
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return FFS_MEDIAERR;
+ }
+ /* load default upcase table */
+ return __load_default_upcase_table(sb);
+}
+
+void free_upcase_table(struct super_block *sb)
+{
+ u32 i;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ u16 **upcase_table;
+
+ upcase_table = p_fs->vol_utbl;
+ for (i = 0; i < UTBL_COL_COUNT; i++)
+ kfree(upcase_table[i]);
+
+ kfree(p_fs->vol_utbl);
+ p_fs->vol_utbl = NULL;
+}
+
+/*
+ * Directory Entry Management Functions
+ */
+
+u32 fat_get_entry_type(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry;
+
+ if (*(ep->name) == 0x0)
+ return TYPE_UNUSED;
+
+ else if (*(ep->name) == 0xE5)
+ return TYPE_DELETED;
+
+ else if (ep->attr == ATTR_EXTEND)
+ return TYPE_EXTEND;
+
+ else if ((ep->attr & (ATTR_SUBDIR|ATTR_VOLUME)) == ATTR_VOLUME)
+ return TYPE_VOLUME;
+
+ else if ((ep->attr & (ATTR_SUBDIR|ATTR_VOLUME)) == ATTR_SUBDIR)
+ return TYPE_DIR;
+
+ return TYPE_FILE;
+}
+
+u32 exfat_get_entry_type(struct dentry_t *p_entry)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *) p_entry;
+
+ if (ep->type == 0x0) {
+ return TYPE_UNUSED;
+ } else if (ep->type < 0x80) {
+ return TYPE_DELETED;
+ } else if (ep->type == 0x80) {
+ return TYPE_INVALID;
+ } else if (ep->type < 0xA0) {
+ if (ep->type == 0x81) {
+ return TYPE_BITMAP;
+ } else if (ep->type == 0x82) {
+ return TYPE_UPCASE;
+ } else if (ep->type == 0x83) {
+ return TYPE_VOLUME;
+ } else if (ep->type == 0x85) {
+ if (GET16_A(ep->attr) & ATTR_SUBDIR)
+ return TYPE_DIR;
+ else
+ return TYPE_FILE;
+ }
+ return TYPE_CRITICAL_PRI;
+ } else if (ep->type < 0xC0) {
+ if (ep->type == 0xA0)
+ return TYPE_GUID;
+ else if (ep->type == 0xA1)
+ return TYPE_PADDING;
+ else if (ep->type == 0xA2)
+ return TYPE_ACLTAB;
+ return TYPE_BENIGN_PRI;
+ } else if (ep->type < 0xE0) {
+ if (ep->type == 0xC0)
+ return TYPE_STREAM;
+ else if (ep->type == 0xC1)
+ return TYPE_EXTEND;
+ else if (ep->type == 0xC2)
+ return TYPE_ACL;
+ return TYPE_CRITICAL_SEC;
+ }
+
+ return TYPE_BENIGN_SEC;
+}
+
+void fat_set_entry_type(struct dentry_t *p_entry, u32 type)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry;
+
+ if (type == TYPE_UNUSED)
+ *(ep->name) = 0x0;
+
+ else if (type == TYPE_DELETED)
+ *(ep->name) = 0xE5;
+
+ else if (type == TYPE_EXTEND)
+ ep->attr = ATTR_EXTEND;
+
+ else if (type == TYPE_DIR)
+ ep->attr = ATTR_SUBDIR;
+
+ else if (type == TYPE_FILE)
+ ep->attr = ATTR_ARCHIVE;
+
+ else if (type == TYPE_SYMLINK)
+ ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK;
+}
+
+void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *) p_entry;
+
+ if (type == TYPE_UNUSED) {
+ ep->type = 0x0;
+ } else if (type == TYPE_DELETED) {
+ ep->type &= ~0x80;
+ } else if (type == TYPE_STREAM) {
+ ep->type = 0xC0;
+ } else if (type == TYPE_EXTEND) {
+ ep->type = 0xC1;
+ } else if (type == TYPE_BITMAP) {
+ ep->type = 0x81;
+ } else if (type == TYPE_UPCASE) {
+ ep->type = 0x82;
+ } else if (type == TYPE_VOLUME) {
+ ep->type = 0x83;
+ } else if (type == TYPE_DIR) {
+ ep->type = 0x85;
+ SET16_A(ep->attr, ATTR_SUBDIR);
+ } else if (type == TYPE_FILE) {
+ ep->type = 0x85;
+ SET16_A(ep->attr, ATTR_ARCHIVE);
+ } else if (type == TYPE_SYMLINK) {
+ ep->type = 0x85;
+ SET16_A(ep->attr, ATTR_ARCHIVE | ATTR_SYMLINK);
+ }
+}
+
+u32 fat_get_entry_attr(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry;
+
+ return (u32) ep->attr;
+}
+
+u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *) p_entry;
+
+ return (u32) GET16_A(ep->attr);
+}
+
+void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry;
+
+ ep->attr = (u8) attr;
+}
+
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *) p_entry;
+
+ SET16_A(ep->attr, (u16) attr);
+}
+
+u8 fat_get_entry_flag(struct dentry_t *p_entry)
+{
+ return 0x01;
+}
+
+u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *) p_entry;
+
+ return ep->flags;
+}
+
+void fat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+{
+}
+
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *) p_entry;
+
+ ep->flags = flags;
+}
+
+u32 fat_get_entry_clu0(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *) p_entry;
+
+ return ((u32)GET16_A(ep->start_clu_hi) << 16) |
+ GET16_A(ep->start_clu_lo);
+}
+
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *) p_entry;
+
+ return GET32_A(ep->start_clu);
+}
+
+void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
+ SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
+}
+
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ SET32_A(ep->start_clu, start_clu);
+}
+
+u64 fat_get_entry_size(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ return (u64) GET32_A(ep->size);
+}
+
+u64 exfat_get_entry_size(struct dentry_t *p_entry)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ return GET64_A(ep->valid_size);
+}
+
+void fat_set_entry_size(struct dentry_t *p_entry, u64 size)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ SET32_A(ep->size, (u32) size);
+}
+
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
+}
+
+void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t = 0x00, d = 0x21;
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ switch (mode) {
+ case TM_CREATE:
+ t = GET16_A(ep->create_time);
+ d = GET16_A(ep->create_date);
+ break;
+ case TM_MODIFY:
+ t = GET16_A(ep->modify_time);
+ d = GET16_A(ep->modify_date);
+ break;
+ }
+
+ tp->sec = (t & 0x001F) << 1;
+ tp->min = (t >> 5) & 0x003F;
+ tp->hour = (t >> 11);
+ tp->day = (d & 0x001F);
+ tp->mon = (d >> 5) & 0x000F;
+ tp->year = (d >> 9);
+}
+
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t = 0x00, d = 0x21;
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ switch (mode) {
+ case TM_CREATE:
+ t = GET16_A(ep->create_time);
+ d = GET16_A(ep->create_date);
+ break;
+ case TM_MODIFY:
+ t = GET16_A(ep->modify_time);
+ d = GET16_A(ep->modify_date);
+ break;
+ case TM_ACCESS:
+ t = GET16_A(ep->access_time);
+ d = GET16_A(ep->access_date);
+ break;
+ }
+
+ tp->sec = (t & 0x001F) << 1;
+ tp->min = (t >> 5) & 0x003F;
+ tp->hour = (t >> 11);
+ tp->day = (d & 0x001F);
+ tp->mon = (d >> 5) & 0x000F;
+ tp->year = (d >> 9);
+}
+
+void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t, d;
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
+ d = (tp->year << 9) | (tp->mon << 5) | tp->day;
+
+ switch (mode) {
+ case TM_CREATE:
+ SET16_A(ep->create_time, t);
+ SET16_A(ep->create_date, d);
+ break;
+ case TM_MODIFY:
+ SET16_A(ep->modify_time, t);
+ SET16_A(ep->modify_date, d);
+ break;
+ }
+}
+
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t, d;
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
+ d = (tp->year << 9) | (tp->mon << 5) | tp->day;
+
+ switch (mode) {
+ case TM_CREATE:
+ SET16_A(ep->create_time, t);
+ SET16_A(ep->create_date, d);
+ break;
+ case TM_MODIFY:
+ SET16_A(ep->modify_time, t);
+ SET16_A(ep->modify_date, d);
+ break;
+ case TM_ACCESS:
+ SET16_A(ep->access_time, t);
+ SET16_A(ep->access_date, d);
+ break;
+ }
+}
+
+s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ u32 type, u32 start_clu, u64 size)
+{
+ sector_t sector;
+ struct dos_dentry_t *dos_ep;
+
+ dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!dos_ep)
+ return FFS_MEDIAERR;
+
+ init_dos_entry(dos_ep, type, start_clu);
+ buf_modify(sb, sector);
+
+ return FFS_SUCCESS;
+}
+
+s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, u32 type, u32 start_clu, u64 size)
+{
+ sector_t sector;
+ u8 flags;
+ struct file_dentry_t *file_ep;
+ struct strm_dentry_t *strm_ep;
+
+ flags = (type == TYPE_FILE) ? 0x01 : 0x03;
+
+ /* we cannot use get_entry_set_in_dir here because file ep is not initialized yet */
+ file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!file_ep)
+ return FFS_MEDIAERR;
+
+ strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry+1,
+ &sector);
+ if (!strm_ep)
+ return FFS_MEDIAERR;
+
+ init_file_entry(file_ep, type);
+ buf_modify(sb, sector);
+
+ init_strm_entry(strm_ep, flags, start_clu, size);
+ buf_modify(sb, sector);
+
+ return FFS_SUCCESS;
+}
+
+static s32 fat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname)
+{
+ int i;
+ sector_t sector;
+ u8 chksum;
+ u16 *uniname = p_uniname->name;
+ struct dos_dentry_t *dos_ep;
+ struct ext_dentry_t *ext_ep;
+
+ dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!dos_ep)
+ return FFS_MEDIAERR;
+
+ dos_ep->lcase = p_dosname->name_case;
+ memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH);
+ buf_modify(sb, sector);
+
+ if ((--num_entries) > 0) {
+ chksum = calc_checksum_1byte((void *)dos_ep->name,
+ DOS_NAME_LENGTH, 0);
+
+ for (i = 1; i < num_entries; i++) {
+ ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb,
+ p_dir,
+ entry - i,
+ &sector);
+ if (!ext_ep)
+ return FFS_MEDIAERR;
+
+ init_ext_entry(ext_ep, i, chksum, uniname);
+ buf_modify(sb, sector);
+ uniname += 13;
+ }
+
+ ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
+ entry - i,
+ &sector);
+ if (!ext_ep)
+ return FFS_MEDIAERR;
+
+ init_ext_entry(ext_ep, i+0x40, chksum, uniname);
+ buf_modify(sb, sector);
+ }
+
+ return FFS_SUCCESS;
+}
+
+static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname)
+{
+ int i;
+ sector_t sector;
+ u16 *uniname = p_uniname->name;
+ struct file_dentry_t *file_ep;
+ struct strm_dentry_t *strm_ep;
+ struct name_dentry_t *name_ep;
+
+ file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!file_ep)
+ return FFS_MEDIAERR;
+
+ file_ep->num_ext = (u8)(num_entries - 1);
+ buf_modify(sb, sector);
+
+ strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry+1,
+ &sector);
+ if (!strm_ep)
+ return FFS_MEDIAERR;
+
+ strm_ep->name_len = p_uniname->name_len;
+ SET16_A(strm_ep->name_hash, p_uniname->name_hash);
+ buf_modify(sb, sector);
+
+ for (i = 2; i < num_entries; i++) {
+ name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir,
+ entry + i,
+ &sector);
+ if (!name_ep)
+ return FFS_MEDIAERR;
+
+ init_name_entry(name_ep, uniname);
+ buf_modify(sb, sector);
+ uniname += 15;
+ }
+
+ update_dir_checksum(sb, p_dir, entry);
+
+ return FFS_SUCCESS;
+}
+
+void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu)
+{
+ struct timestamp_t tm, *tp;
+
+ fat_set_entry_type((struct dentry_t *) ep, type);
+ SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
+ SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
+ SET32_A(ep->size, 0);
+
+ tp = tm_current(&tm);
+ fat_set_entry_time((struct dentry_t *) ep, tp, TM_CREATE);
+ fat_set_entry_time((struct dentry_t *) ep, tp, TM_MODIFY);
+ SET16_A(ep->access_date, 0);
+ ep->create_time_ms = 0;
+}
+
+void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname)
+{
+ int i;
+ bool end = false;
+
+ fat_set_entry_type((struct dentry_t *) ep, TYPE_EXTEND);
+ ep->order = (u8) order;
+ ep->sysid = 0;
+ ep->checksum = chksum;
+ SET16_A(ep->start_clu, 0);
+
+ for (i = 0; i < 10; i += 2) {
+ if (!end) {
+ SET16(ep->unicode_0_4+i, *uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ SET16(ep->unicode_0_4+i, 0xFFFF);
+ }
+ }
+
+ for (i = 0; i < 12; i += 2) {
+ if (!end) {
+ SET16_A(ep->unicode_5_10 + i, *uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ SET16_A(ep->unicode_5_10 + i, 0xFFFF);
+ }
+ }
+
+ for (i = 0; i < 4; i += 2) {
+ if (!end) {
+ SET16_A(ep->unicode_11_12 + i, *uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ SET16_A(ep->unicode_11_12 + i, 0xFFFF);
+ }
+ }
+}
+
+void init_file_entry(struct file_dentry_t *ep, u32 type)
+{
+ struct timestamp_t tm, *tp;
+
+ exfat_set_entry_type((struct dentry_t *)ep, type);
+
+ tp = tm_current(&tm);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
+ ep->create_time_ms = 0;
+ ep->modify_time_ms = 0;
+ ep->access_time_ms = 0;
+}
+
+void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
+{
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
+ ep->flags = flags;
+ SET32_A(ep->start_clu, start_clu);
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
+}
+
+void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
+{
+ int i;
+
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
+ ep->flags = 0x0;
+
+ for (i = 0; i < 30; i++, i++) {
+ SET16_A(ep->unicode_0_14+i, *uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+}
+
+void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
+{
+ int i;
+ sector_t sector;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = num_entries-1; i >= order; i--) {
+ ep = get_entry_in_dir(sb, p_dir, entry-i, &sector);
+ if (!ep)
+ return;
+
+ p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+ buf_modify(sb, sector);
+ }
+}
+
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
+{
+ int i;
+ sector_t sector;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = order; i < num_entries; i++) {
+ ep = get_entry_in_dir(sb, p_dir, entry+i, &sector);
+ if (!ep)
+ return;
+
+ p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+ buf_modify(sb, sector);
+ }
+}
+
+void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry)
+{
+ int i, num_entries;
+ sector_t sector;
+ u16 chksum;
+ struct file_dentry_t *file_ep;
+ struct dentry_t *ep;
+
+ file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!file_ep)
+ return;
+
+ buf_lock(sb, sector);
+
+ num_entries = (s32) file_ep->num_ext + 1;
+ chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0,
+ CS_DIR_ENTRY);
+
+ for (i = 1; i < num_entries; i++) {
+ ep = get_entry_in_dir(sb, p_dir, entry+i, NULL);
+ if (!ep) {
+ buf_unlock(sb, sector);
+ return;
+ }
+
+ chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
+ CS_DEFAULT);
+ }
+
+ SET16_A(file_ep->checksum, chksum);
+ buf_modify(sb, sector);
+ buf_unlock(sb, sector);
+}
+
+void update_dir_checksum_with_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es)
+{
+ struct dentry_t *ep;
+ u16 chksum = 0;
+ s32 chksum_type = CS_DIR_ENTRY, i;
+
+ ep = (struct dentry_t *)&(es->__buf);
+ for (i = 0; i < es->num_entries; i++) {
+ pr_debug("%s ep %p\n", __func__, ep);
+ chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
+ chksum_type);
+ ep++;
+ chksum_type = CS_DEFAULT;
+ }
+
+ ep = (struct dentry_t *)&(es->__buf);
+ SET16_A(((struct file_dentry_t *)ep)->checksum, chksum);
+ write_whole_entry_set(sb, es);
+}
+
+static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
+ s32 byte_offset, u32 *clu)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ s32 clu_offset;
+ u32 cur_clu;
+
+ clu_offset = byte_offset >> p_fs->cluster_size_bits;
+ cur_clu = p_dir->dir;
+
+ if (p_dir->flags == 0x03) {
+ cur_clu += clu_offset;
+ } else {
+ while (clu_offset > 0) {
+ if (FAT_read(sb, cur_clu, &cur_clu) == -1)
+ return FFS_MEDIAERR;
+ clu_offset--;
+ }
+ }
+
+ if (clu)
+ *clu = cur_clu;
+ return FFS_SUCCESS;
+}
+
+s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ sector_t *sector, s32 *offset)
+{
+ s32 off, ret;
+ u32 clu = 0;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ off = entry << DENTRY_SIZE_BITS;
+
+ if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */
+ *offset = off & p_bd->sector_size_mask;
+ *sector = off >> p_bd->sector_size_bits;
+ *sector += p_fs->root_start_sector;
+ } else {
+ ret = _walk_fat_chain(sb, p_dir, off, &clu);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ /* byte offset in cluster */
+ off &= p_fs->cluster_size - 1;
+
+ /* byte offset in sector */
+ *offset = off & p_bd->sector_size_mask;
+
+ /* sector offset in cluster */
+ *sector = off >> p_bd->sector_size_bits;
+ *sector += START_SECTOR(clu);
+ }
+ return FFS_SUCCESS;
+}
+
+struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
+ s32 offset)
+{
+ u8 *buf;
+
+ buf = buf_getblk(sb, sector);
+
+ if (buf == NULL)
+ return NULL;
+
+ return (struct dentry_t *)(buf + offset);
+}
+
+struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, sector_t *sector)
+{
+ s32 off;
+ sector_t sec;
+ u8 *buf;
+
+ if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS)
+ return NULL;
+
+ buf = buf_getblk(sb, sec);
+
+ if (buf == NULL)
+ return NULL;
+
+ if (sector != NULL)
+ *sector = sec;
+ return (struct dentry_t *)(buf + off);
+}
+
+/* returns a set of dentries for a file or dir.
+ * Note that this is a copy (dump) of dentries so that user should call write_entry_set()
+ * to apply changes made in this entry set to the real device.
+ * in:
+ * sb+p_dir+entry: indicates a file/dir
+ * type: specifies how many dentries should be included.
+ * out:
+ * file_ep: will point the first dentry(= file dentry) on success
+ * return:
+ * pointer of entry set on success,
+ * NULL on failure.
+ */
+
+#define ES_MODE_STARTED 0
+#define ES_MODE_GET_FILE_ENTRY 1
+#define ES_MODE_GET_STRM_ENTRY 2
+#define ES_MODE_GET_NAME_ENTRY 3
+#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4
+struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u32 type,
+ struct dentry_t **file_ep)
+{
+ s32 off, ret, byte_offset;
+ u32 clu = 0;
+ sector_t sec;
+ u32 entry_type;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct entry_set_cache_t *es = NULL;
+ struct dentry_t *ep, *pos;
+ u8 *buf;
+ u8 num_entries;
+ s32 mode = ES_MODE_STARTED;
+ size_t bufsize;
+
+ pr_debug("%s entered p_dir dir %u flags %x size %d\n",
+ __func__, p_dir->dir, p_dir->flags, p_dir->size);
+
+ byte_offset = entry << DENTRY_SIZE_BITS;
+ ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu);
+ if (ret != FFS_SUCCESS)
+ return NULL;
+
+ /* byte offset in cluster */
+ byte_offset &= p_fs->cluster_size - 1;
+
+ /* byte offset in sector */
+ off = byte_offset & p_bd->sector_size_mask;
+
+ /* sector offset in cluster */
+ sec = byte_offset >> p_bd->sector_size_bits;
+ sec += START_SECTOR(clu);
+
+ buf = buf_getblk(sb, sec);
+ if (buf == NULL)
+ goto err_out;
+
+ ep = (struct dentry_t *)(buf + off);
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if ((entry_type != TYPE_FILE)
+ && (entry_type != TYPE_DIR))
+ goto err_out;
+
+ if (type == ES_ALL_ENTRIES)
+ num_entries = ((struct file_dentry_t *)ep)->num_ext+1;
+ else
+ num_entries = type;
+
+ bufsize = offsetof(struct entry_set_cache_t, __buf) + (num_entries) *
+ sizeof(struct dentry_t);
+ pr_debug("%s: trying to kmalloc %zx bytes for %d entries\n", __func__,
+ bufsize, num_entries);
+ es = kmalloc(bufsize, GFP_KERNEL);
+ if (es == NULL)
+ goto err_out;
+
+ es->num_entries = num_entries;
+ es->sector = sec;
+ es->offset = off;
+ es->alloc_flag = p_dir->flags;
+
+ pos = (struct dentry_t *) &(es->__buf);
+
+ while (num_entries) {
+ /*
+ * instead of copying whole sector, we will check every entry.
+ * this will provide minimum stablity and consistency.
+ */
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
+ goto err_out;
+
+ switch (mode) {
+ case ES_MODE_STARTED:
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR))
+ mode = ES_MODE_GET_FILE_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_FILE_ENTRY:
+ if (entry_type == TYPE_STREAM)
+ mode = ES_MODE_GET_STRM_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_STRM_ENTRY:
+ if (entry_type == TYPE_EXTEND)
+ mode = ES_MODE_GET_NAME_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_NAME_ENTRY:
+ if (entry_type == TYPE_EXTEND)
+ break;
+ else if (entry_type == TYPE_STREAM)
+ goto err_out;
+ else if (entry_type & TYPE_CRITICAL_SEC)
+ mode = ES_MODE_GET_CRITICAL_SEC_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_CRITICAL_SEC_ENTRY:
+ if ((entry_type == TYPE_EXTEND) ||
+ (entry_type == TYPE_STREAM))
+ goto err_out;
+ else if ((entry_type & TYPE_CRITICAL_SEC) !=
+ TYPE_CRITICAL_SEC)
+ goto err_out;
+ break;
+ }
+
+ memcpy(pos, ep, sizeof(struct dentry_t));
+
+ if (--num_entries == 0)
+ break;
+
+ if (((off + DENTRY_SIZE) & p_bd->sector_size_mask) <
+ (off & p_bd->sector_size_mask)) {
+ /* get the next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (FAT_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ buf = buf_getblk(sb, sec);
+ if (buf == NULL)
+ goto err_out;
+ off = 0;
+ ep = (struct dentry_t *)(buf);
+ } else {
+ ep++;
+ off += DENTRY_SIZE;
+ }
+ pos++;
+ }
+
+ if (file_ep)
+ *file_ep = (struct dentry_t *)&(es->__buf);
+
+ pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n",
+ __func__, es, (unsigned long long)es->sector, es->offset,
+ es->alloc_flag, es->num_entries, &es->__buf);
+ return es;
+err_out:
+ pr_debug("%s exited NULL (es %p)\n", __func__, es);
+ kfree(es);
+ return NULL;
+}
+
+void release_entry_set(struct entry_set_cache_t *es)
+{
+ pr_debug("%s es=%p\n", __func__, es);
+ kfree(es);
+}
+
+static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ sector_t sec, s32 off, u32 count)
+{
+ s32 num_entries, buf_off = (off - es->offset);
+ u32 remaining_byte_in_sector, copy_entries;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ u32 clu;
+ u8 *buf, *esbuf = (u8 *)&(es->__buf);
+
+ pr_debug("%s entered es %p sec %llu off %d count %d\n",
+ __func__, es, (unsigned long long)sec, off, count);
+ num_entries = count;
+
+ while (num_entries) {
+ /* white per sector base */
+ remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
+ copy_entries = min_t(s32,
+ remaining_byte_in_sector >> DENTRY_SIZE_BITS,
+ num_entries);
+ buf = buf_getblk(sb, sec);
+ if (buf == NULL)
+ goto err_out;
+ pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
+ pr_debug("copying %d entries from %p to sector %llu\n",
+ copy_entries, (esbuf + buf_off),
+ (unsigned long long)sec);
+ memcpy(buf + off, esbuf + buf_off,
+ copy_entries << DENTRY_SIZE_BITS);
+ buf_modify(sb, sec);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ /* get next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ clu = GET_CLUSTER_FROM_SECTOR(sec);
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (FAT_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += copy_entries << DENTRY_SIZE_BITS;
+ }
+ }
+
+ pr_debug("%s exited successfully\n", __func__);
+ return FFS_SUCCESS;
+err_out:
+ pr_debug("%s failed\n", __func__);
+ return FFS_ERROR;
+}
+
+/* write back all entries in entry set */
+s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
+{
+ return __write_partial_entries_in_entry_set(sb, es, es->sector,
+ es->offset,
+ es->num_entries);
+}
+
+/* write back some entries in entry set */
+s32 write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es, struct dentry_t *ep, u32 count)
+{
+ s32 ret, byte_offset, off;
+ u32 clu = 0;
+ sector_t sec;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct chain_t dir;
+
+ /* vaidity check */
+ if (ep + count > ((struct dentry_t *)&(es->__buf)) + es->num_entries)
+ return FFS_ERROR;
+
+ dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector);
+ dir.flags = es->alloc_flag;
+ dir.size = 0xffffffff; /* XXX */
+
+ byte_offset = (es->sector - START_SECTOR(dir.dir)) <<
+ p_bd->sector_size_bits;
+ byte_offset += ((void **)ep - &(es->__buf)) + es->offset;
+
+ ret = _walk_fat_chain(sb, &dir, byte_offset, &clu);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ /* byte offset in cluster */
+ byte_offset &= p_fs->cluster_size - 1;
+
+ /* byte offset in sector */
+ off = byte_offset & p_bd->sector_size_mask;
+
+ /* sector offset in cluster */
+ sec = byte_offset >> p_bd->sector_size_bits;
+ sec += START_SECTOR(clu);
+ return __write_partial_entries_in_entry_set(sb, es, sec, off, count);
+}
+
+/* search EMPTY CONTINUOUS "num_entries" entries */
+s32 search_deleted_or_unused_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 num_entries)
+{
+ int i, dentry, num_empty = 0;
+ s32 dentries_per_clu;
+ u32 type;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ if (p_fs->hint_uentry.dir == p_dir->dir) {
+ if (p_fs->hint_uentry.entry == -1)
+ return -1;
+
+ clu.dir = p_fs->hint_uentry.clu.dir;
+ clu.size = p_fs->hint_uentry.clu.size;
+ clu.flags = p_fs->hint_uentry.clu.flags;
+
+ dentry = p_fs->hint_uentry.entry;
+ } else {
+ p_fs->hint_uentry.entry = -1;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ dentry = 0;
+ }
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ i = dentry % dentries_per_clu;
+ else
+ i = dentry & (dentries_per_clu-1);
+
+ for (; i < dentries_per_clu; i++, dentry++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -1;
+
+ type = p_fs->fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED) {
+ num_empty++;
+ if (p_fs->hint_uentry.entry == -1) {
+ p_fs->hint_uentry.dir = p_dir->dir;
+ p_fs->hint_uentry.entry = dentry;
+
+ p_fs->hint_uentry.clu.dir = clu.dir;
+ p_fs->hint_uentry.clu.size = clu.size;
+ p_fs->hint_uentry.clu.flags = clu.flags;
+ }
+ } else if (type == TYPE_DELETED) {
+ num_empty++;
+ } else {
+ num_empty = 0;
+ }
+
+ if (num_empty >= num_entries) {
+ p_fs->hint_uentry.dir = CLUSTER_32(~0);
+ p_fs->hint_uentry.entry = -1;
+
+ if (p_fs->vol_type == EXFAT)
+ return dentry - (num_entries-1);
+ else
+ return dentry;
+ }
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
+{
+ s32 ret, dentry;
+ u32 last_clu;
+ sector_t sector;
+ u64 size = 0;
+ struct chain_t clu;
+ struct dentry_t *ep = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ return search_deleted_or_unused_entry(sb, p_dir, num_entries);
+
+ while ((dentry = search_deleted_or_unused_entry(sb, p_dir, num_entries)) < 0) {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (p_fs->vol_type == EXFAT) {
+ if (p_dir->dir != p_fs->root_dir)
+ size = i_size_read(inode);
+ }
+
+ last_clu = find_last_cluster(sb, p_dir);
+ clu.dir = last_clu + 1;
+ clu.size = 0;
+ clu.flags = p_dir->flags;
+
+ /* (1) allocate a cluster */
+ ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
+ if (ret < 1)
+ return -1;
+
+ if (clear_cluster(sb, clu.dir) != FFS_SUCCESS)
+ return -1;
+
+ /* (2) append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size);
+ p_dir->flags = 0x01;
+ p_fs->hint_uentry.clu.flags = 0x01;
+ }
+ if (clu.flags == 0x01)
+ if (FAT_write(sb, last_clu, clu.dir) < 0)
+ return -1;
+
+ if (p_fs->hint_uentry.entry == -1) {
+ p_fs->hint_uentry.dir = p_dir->dir;
+ p_fs->hint_uentry.entry = p_dir->size << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS);
+
+ p_fs->hint_uentry.clu.dir = clu.dir;
+ p_fs->hint_uentry.clu.size = 0;
+ p_fs->hint_uentry.clu.flags = clu.flags;
+ }
+ p_fs->hint_uentry.clu.size++;
+ p_dir->size++;
+
+ /* (3) update the directory entry */
+ if (p_fs->vol_type == EXFAT) {
+ if (p_dir->dir != p_fs->root_dir) {
+ size += p_fs->cluster_size;
+
+ ep = get_entry_in_dir(sb, &fid->dir,
+ fid->entry + 1, &sector);
+ if (!ep)
+ return -1;
+ p_fs->fs_func->set_entry_size(ep, size);
+ p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ buf_modify(sb, sector);
+
+ update_dir_checksum(sb, &(fid->dir),
+ fid->entry);
+ }
+ }
+
+ i_size_write(inode, i_size_read(inode)+p_fs->cluster_size);
+ EXFAT_I(inode)->mmu_private += p_fs->cluster_size;
+ EXFAT_I(inode)->fid.size += p_fs->cluster_size;
+ EXFAT_I(inode)->fid.flags = p_dir->flags;
+ inode->i_blocks += 1 << (p_fs->cluster_size_bits - 9);
+ }
+
+ return dentry;
+}
+
+/* return values of fat_find_dir_entry()
+ * >= 0 : return dir entiry position with the name in dir
+ * -1 : (root dir, ".") it is the root dir itself
+ * -2 : entry with the name does not exist
+ */
+s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type)
+{
+ int i, dentry = 0, len;
+ s32 order = 0;
+ bool is_feasible_entry = true, has_ext_entry = false;
+ s32 dentries_per_clu;
+ u32 entry_type;
+ u16 entry_uniname[14], *uniname = NULL, unichar;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct dos_dentry_t *dos_ep;
+ struct ext_dentry_t *ext_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == p_fs->root_dir) {
+ if ((!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_CUR_DIR_NAME)) ||
+ (!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_PAR_DIR_NAME)))
+ return -1; // special case, root directory itself
+ }
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++, dentry++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -2;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
+ if ((type == TYPE_ALL) || (type == entry_type)) {
+ if (is_feasible_entry && has_ext_entry)
+ return dentry;
+
+ dos_ep = (struct dos_dentry_t *) ep;
+ if (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name))
+ return dentry;
+ }
+ is_feasible_entry = true;
+ has_ext_entry = false;
+ } else if (entry_type == TYPE_EXTEND) {
+ if (is_feasible_entry) {
+ ext_ep = (struct ext_dentry_t *) ep;
+ if (ext_ep->order > 0x40) {
+ order = (s32)(ext_ep->order - 0x40);
+ uniname = p_uniname->name + 13 * (order-1);
+ } else {
+ order = (s32) ext_ep->order;
+ uniname -= 13;
+ }
+
+ len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order);
+
+ unichar = *(uniname+len);
+ *(uniname+len) = 0x0;
+
+ if (nls_uniname_cmp(sb, uniname, entry_uniname))
+ is_feasible_entry = false;
+
+ *(uniname+len) = unichar;
+ }
+ has_ext_entry = true;
+ } else if (entry_type == TYPE_UNUSED) {
+ return -2;
+ }
+ is_feasible_entry = true;
+ has_ext_entry = false;
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -2;
+ }
+
+ return -2;
+}
+
+/* return values of exfat_find_dir_entry()
+ * >= 0 : return dir entiry position with the name in dir
+ * -1 : (root dir, ".") it is the root dir itself
+ * -2 : entry with the name does not exist
+ */
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type)
+{
+ int i = 0, dentry = 0, num_ext_entries = 0, len, step;
+ s32 order = 0;
+ bool is_feasible_entry = false;
+ s32 dentries_per_clu, num_empty = 0;
+ u32 entry_type;
+ u16 entry_uniname[16], *uniname = NULL, unichar;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct file_dentry_t *file_ep;
+ struct strm_dentry_t *strm_ep;
+ struct name_dentry_t *name_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == p_fs->root_dir) {
+ if ((!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_CUR_DIR_NAME)) ||
+ (!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_PAR_DIR_NAME)))
+ return -1; // special case, root directory itself
+ }
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ p_fs->hint_uentry.dir = p_dir->dir;
+ p_fs->hint_uentry.entry = -1;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ while (i < dentries_per_clu) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -2;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+ step = 1;
+
+ if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
+ is_feasible_entry = false;
+
+ if (p_fs->hint_uentry.entry == -1) {
+ num_empty++;
+
+ if (num_empty == 1) {
+ p_fs->hint_uentry.clu.dir = clu.dir;
+ p_fs->hint_uentry.clu.size = clu.size;
+ p_fs->hint_uentry.clu.flags = clu.flags;
+ }
+ if ((num_empty >= num_entries) || (entry_type == TYPE_UNUSED))
+ p_fs->hint_uentry.entry = dentry - (num_empty-1);
+ }
+
+ if (entry_type == TYPE_UNUSED)
+ return -2;
+ } else {
+ num_empty = 0;
+
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
+ file_ep = (struct file_dentry_t *) ep;
+ if ((type == TYPE_ALL) || (type == entry_type)) {
+ num_ext_entries = file_ep->num_ext;
+ is_feasible_entry = true;
+ } else {
+ is_feasible_entry = false;
+ step = file_ep->num_ext + 1;
+ }
+ } else if (entry_type == TYPE_STREAM) {
+ if (is_feasible_entry) {
+ strm_ep = (struct strm_dentry_t *)ep;
+ if (p_uniname->name_hash == GET16_A(strm_ep->name_hash) &&
+ p_uniname->name_len == strm_ep->name_len) {
+ order = 1;
+ } else {
+ is_feasible_entry = false;
+ step = num_ext_entries;
+ }
+ }
+ } else if (entry_type == TYPE_EXTEND) {
+ if (is_feasible_entry) {
+ name_ep = (struct name_dentry_t *)ep;
+
+ if ((++order) == 2)
+ uniname = p_uniname->name;
+ else
+ uniname += 15;
+
+ len = extract_uni_name_from_name_entry(name_ep,
+ entry_uniname, order);
+
+ unichar = *(uniname+len);
+ *(uniname+len) = 0x0;
+
+ if (nls_uniname_cmp(sb, uniname, entry_uniname)) {
+ is_feasible_entry = false;
+ step = num_ext_entries - order + 1;
+ } else if (order == num_ext_entries) {
+ p_fs->hint_uentry.dir = CLUSTER_32(~0);
+ p_fs->hint_uentry.entry = -1;
+ return dentry - (num_ext_entries);
+ }
+
+ *(uniname+len) = unichar;
+ }
+ } else {
+ is_feasible_entry = false;
+ }
+ }
+
+ i += step;
+ dentry += step;
+ }
+
+ i -= dentries_per_clu;
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -2;
+ }
+ }
+
+ return -2;
+}
+
+s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry)
+{
+ s32 count = 0;
+ u8 chksum;
+ struct dos_dentry_t *dos_ep = (struct dos_dentry_t *) p_entry;
+ struct ext_dentry_t *ext_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ chksum = calc_checksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0);
+
+ for (entry--; entry >= 0; entry--) {
+ ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
+ entry, NULL);
+ if (!ext_ep)
+ return -1;
+
+ if ((p_fs->fs_func->get_entry_type((struct dentry_t *)ext_ep) ==
+ TYPE_EXTEND) && (ext_ep->checksum == chksum)) {
+ count++;
+ if (ext_ep->order > 0x40)
+ return count;
+ } else {
+ return count;
+ }
+ }
+
+ return count;
+}
+
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry)
+{
+ int i, count = 0;
+ u32 type;
+ struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry;
+ struct dentry_t *ext_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
+ ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL);
+ if (!ext_ep)
+ return -1;
+
+ type = p_fs->fs_func->get_entry_type(ext_ep);
+ if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
+ count++;
+ else
+ return count;
+ }
+
+ return count;
+}
+
+s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
+ u32 type)
+{
+ int i, count = 0;
+ s32 dentries_per_clu;
+ u32 entry_type;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -1;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if (entry_type == TYPE_UNUSED)
+ return count;
+ if (!(type & TYPE_CRITICAL_PRI) &&
+ !(type & TYPE_BENIGN_PRI))
+ continue;
+
+ if ((type == TYPE_ALL) || (type == entry_type))
+ count++;
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -1;
+ }
+ }
+
+ return count;
+}
+
+bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
+{
+ int i, count = 0;
+ s32 dentries_per_clu;
+ u32 type;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ break;
+
+ type = p_fs->fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED)
+ return true;
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ return false;
+
+ if (p_fs->vol_type == EXFAT)
+ return false;
+ if ((p_dir->dir == p_fs->root_dir) || ((++count) > 2))
+ return false;
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ }
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * Name Conversion Functions
+ */
+
+/* input : dir, uni_name
+ * output : num_of_entry, dos_name(format : aaaaaa~1.bbb)
+ */
+s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 *entries,
+ struct dos_name_t *p_dosname)
+{
+ s32 ret, num_entries;
+ bool lossy = false;
+ char **r;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
+ if (num_entries == 0)
+ return FFS_INVALIDPATH;
+
+ if (p_fs->vol_type != EXFAT) {
+ nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy);
+
+ if (lossy) {
+ ret = fat_generate_dos_name(sb, p_dir, p_dosname);
+ if (ret)
+ return ret;
+ } else {
+ for (r = reserved_names; *r; r++) {
+ if (!strncmp((void *)p_dosname->name, *r, 8))
+ return FFS_INVALIDPATH;
+ }
+
+ if (p_dosname->name_case != 0xFF)
+ num_entries = 1;
+ }
+
+ if (num_entries > 1)
+ p_dosname->name_case = 0x0;
+ }
+
+ *entries = num_entries;
+
+ return FFS_SUCCESS;
+}
+
+void get_uni_name_from_dos_entry(struct super_block *sb,
+ struct dos_dentry_t *ep,
+ struct uni_name_t *p_uniname, u8 mode)
+{
+ struct dos_name_t dos_name;
+
+ if (mode == 0x0)
+ dos_name.name_case = 0x0;
+ else
+ dos_name.name_case = ep->lcase;
+
+ memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH);
+ nls_dosname_to_uniname(sb, p_uniname, &dos_name);
+}
+
+void fat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname)
+{
+ int i;
+ struct ext_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (entry--, i = 1; entry >= 0; entry--, i++) {
+ ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ NULL);
+ if (!ep)
+ return;
+
+ if (p_fs->fs_func->get_entry_type((struct dentry_t *)ep) ==
+ TYPE_EXTEND) {
+ extract_uni_name_from_ext_entry(ep, uniname, i);
+ if (ep->order > 0x40)
+ return;
+ } else {
+ return;
+ }
+
+ uniname += 13;
+ }
+}
+
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname)
+{
+ int i;
+ struct dentry_t *ep;
+ struct entry_set_cache_t *es;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
+ if (es == NULL || es->num_entries < 3) {
+ if (es)
+ release_entry_set(es);
+ return;
+ }
+
+ ep += 2;
+
+ /*
+ * First entry : file entry
+ * Second entry : stream-extension entry
+ * Third entry : first file-name entry
+ * So, the index of first file-name dentry should start from 2.
+ */
+ for (i = 2; i < es->num_entries; i++, ep++) {
+ if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND)
+ extract_uni_name_from_name_entry((struct name_dentry_t *)
+ ep, uniname, i);
+ else
+ goto out;
+ uniname += 15;
+ }
+
+out:
+ release_entry_set(es);
+}
+
+s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, u16 *uniname,
+ s32 order)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 10; i += 2) {
+ *uniname = GET16(ep->unicode_0_4 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ if (order < 20) {
+ for (i = 0; i < 12; i += 2) {
+ *uniname = GET16_A(ep->unicode_5_10 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+ } else {
+ for (i = 0; i < 8; i += 2) {
+ *uniname = GET16_A(ep->unicode_5_10 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+ *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */
+ return len;
+ }
+
+ for (i = 0; i < 4; i += 2) {
+ *uniname = GET16_A(ep->unicode_11_12 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+}
+
+s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
+ s32 order)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 30; i += 2) {
+ *uniname = GET16_A(ep->unicode_0_14 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+}
+
+s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct dos_name_t *p_dosname)
+{
+ int i, j, count = 0;
+ bool count_begin = false;
+ s32 dentries_per_clu;
+ u32 type;
+ u8 bmap[128/* 1 ~ 1023 */];
+ struct chain_t clu;
+ struct dos_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ memset(bmap, 0, sizeof(bmap));
+ exfat_bitmap_set(bmap, 0);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = (struct dos_dentry_t *)get_entry_in_dir(sb, &clu,
+ i, NULL);
+ if (!ep)
+ return FFS_MEDIAERR;
+
+ type = p_fs->fs_func->get_entry_type((struct dentry_t *)
+ ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ count = 0;
+ count_begin = false;
+
+ for (j = 0; j < 8; j++) {
+ if (ep->name[j] == ' ')
+ break;
+
+ if (ep->name[j] == '~') {
+ count_begin = true;
+ } else if (count_begin) {
+ if ((ep->name[j] >= '0') &&
+ (ep->name[j] <= '9')) {
+ count = count * 10 +
+ (ep->name[j] - '0');
+ } else {
+ count = 0;
+ count_begin = false;
+ }
+ }
+ }
+
+ if ((count > 0) && (count < 1024))
+ exfat_bitmap_set(bmap, count);
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return FFS_MEDIAERR;
+ }
+
+ count = 0;
+ for (i = 0; i < 128; i++) {
+ if (bmap[i] != 0xFF) {
+ for (j = 0; j < 8; j++) {
+ if (exfat_bitmap_test(&bmap[i], j) == 0) {
+ count = (i << 3) + j;
+ break;
+ }
+ }
+ if (count != 0)
+ break;
+ }
+ }
+
+ if ((count == 0) || (count >= 1024))
+ return FFS_FILEEXIST;
+ fat_attach_count_to_dos_name(p_dosname->name, count);
+
+ /* Now dos_name has DOS~????.EXT */
+ return FFS_SUCCESS;
+}
+
+void fat_attach_count_to_dos_name(u8 *dosname, s32 count)
+{
+ int i, j, length;
+ char str_count[6];
+
+ snprintf(str_count, sizeof(str_count), "~%d", count);
+ length = strlen(str_count);
+
+ i = 0;
+ j = 0;
+ while (j <= (8 - length)) {
+ i = j;
+ if (dosname[j] == ' ')
+ break;
+ if (dosname[j] & 0x80)
+ j += 2;
+ else
+ j++;
+ }
+
+ for (j = 0; j < length; i++, j++)
+ dosname[i] = (u8)str_count[j];
+
+ if (i == 7)
+ dosname[7] = ' ';
+}
+
+s32 fat_calc_num_entries(struct uni_name_t *p_uniname)
+{
+ s32 len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return 0;
+
+ /* 1 dos name entry + extended entries */
+ return (len - 1) / 13 + 2;
+}
+
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+{
+ s32 len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return 0;
+
+ /* 1 file entry + 1 stream entry + name entries */
+ return (len - 1) / 15 + 3;
+}
+
+u8 calc_checksum_1byte(void *data, s32 len, u8 chksum)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
+
+ return chksum;
+}
+
+u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ switch (type) {
+ case CS_DIR_ENTRY:
+ for (i = 0; i < len; i++, c++) {
+ if ((i == 2) || (i == 3))
+ continue;
+ chksum = (((chksum & 1) << 15) |
+ ((chksum & 0xFFFE) >> 1)) + (u16)*c;
+ }
+ break;
+ default
+ :
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 15) |
+ ((chksum & 0xFFFE) >> 1)) + (u16)*c;
+ }
+
+ return chksum;
+}
+
+u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ switch (type) {
+ case CS_PBR_SECTOR:
+ for (i = 0; i < len; i++, c++) {
+ if ((i == 106) || (i == 107) || (i == 112))
+ continue;
+ chksum = (((chksum & 1) << 31) |
+ ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
+ }
+ break;
+ default
+ :
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 31) |
+ ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
+ }
+
+ return chksum;
+}
+
+/*
+ * Name Resolution Functions
+ */
+
+/* return values of resolve_path()
+ * > 0 : return the length of the path
+ * < 0 : return error
+ */
+s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname)
+{
+ bool lossy = false;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ if (strlen(path) >= (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+ return FFS_INVALIDPATH;
+
+ strcpy(name_buf, path);
+
+ nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy);
+ if (lossy)
+ return FFS_INVALIDPATH;
+
+ fid->size = i_size_read(inode);
+
+ p_dir->dir = fid->start_clu;
+ p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits);
+ p_dir->flags = fid->flags;
+
+ return FFS_SUCCESS;
+}
+
+/*
+ * File Operation Functions
+ */
+static struct fs_func fat_fs_func = {
+ .alloc_cluster = fat_alloc_cluster,
+ .free_cluster = fat_free_cluster,
+ .count_used_clusters = fat_count_used_clusters,
+
+ .init_dir_entry = fat_init_dir_entry,
+ .init_ext_entry = fat_init_ext_entry,
+ .find_dir_entry = fat_find_dir_entry,
+ .delete_dir_entry = fat_delete_dir_entry,
+ .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry,
+ .count_ext_entries = fat_count_ext_entries,
+ .calc_num_entries = fat_calc_num_entries,
+
+ .get_entry_type = fat_get_entry_type,
+ .set_entry_type = fat_set_entry_type,
+ .get_entry_attr = fat_get_entry_attr,
+ .set_entry_attr = fat_set_entry_attr,
+ .get_entry_flag = fat_get_entry_flag,
+ .set_entry_flag = fat_set_entry_flag,
+ .get_entry_clu0 = fat_get_entry_clu0,
+ .set_entry_clu0 = fat_set_entry_clu0,
+ .get_entry_size = fat_get_entry_size,
+ .set_entry_size = fat_set_entry_size,
+ .get_entry_time = fat_get_entry_time,
+ .set_entry_time = fat_set_entry_time,
+};
+
+s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
+{
+ s32 num_reserved, num_root_sectors;
+ struct bpb16_t *p_bpb = (struct bpb16_t *)p_pbr->bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bpb->num_fats == 0)
+ return FFS_FORMATERR;
+
+ num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS;
+ num_root_sectors = ((num_root_sectors - 1) >>
+ p_bd->sector_size_bits) + 1;
+
+ p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
+ p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
+ p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
+ p_bd->sector_size_bits;
+ p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
+
+ p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors);
+
+ p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
+ if (p_bpb->num_fats == 1)
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
+ else
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
+ p_fs->num_FAT_sectors;
+
+ p_fs->root_start_sector = p_fs->FAT2_start_sector +
+ p_fs->num_FAT_sectors;
+ p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors;
+
+ p_fs->num_sectors = GET16(p_bpb->num_sectors);
+ if (p_fs->num_sectors == 0)
+ p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
+
+ num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
+ p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
+ p_fs->sectors_per_clu_bits) + 2;
+ /* because the cluster index starts with 2 */
+
+ if (p_fs->num_clusters < FAT12_THRESHOLD)
+ p_fs->vol_type = FAT12;
+ else
+ p_fs->vol_type = FAT16;
+ p_fs->vol_id = GET32(p_bpb->vol_serial);
+
+ p_fs->root_dir = 0;
+ p_fs->dentries_in_root = GET16(p_bpb->num_root_entries);
+ p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
+ DENTRY_SIZE_BITS);
+
+ p_fs->vol_flag = VOL_CLEAN;
+ p_fs->clu_srch_ptr = 2;
+ p_fs->used_clusters = (u32)~0;
+
+ p_fs->fs_func = &fat_fs_func;
+
+ return FFS_SUCCESS;
+}
+
+s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
+{
+ s32 num_reserved;
+ struct bpb32_t *p_bpb = (struct bpb32_t *)p_pbr->bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bpb->num_fats == 0)
+ return FFS_FORMATERR;
+
+ p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
+ p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
+ p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
+ p_bd->sector_size_bits;
+ p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
+
+ p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors);
+
+ p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
+ if (p_bpb->num_fats == 1)
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
+ else
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
+ p_fs->num_FAT_sectors;
+
+ p_fs->root_start_sector = p_fs->FAT2_start_sector +
+ p_fs->num_FAT_sectors;
+ p_fs->data_start_sector = p_fs->root_start_sector;
+
+ p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
+ num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
+
+ p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
+ p_fs->sectors_per_clu_bits) + 2;
+ /* because the cluster index starts with 2 */
+
+ p_fs->vol_type = FAT32;
+ p_fs->vol_id = GET32(p_bpb->vol_serial);
+
+ p_fs->root_dir = GET32(p_bpb->root_cluster);
+ p_fs->dentries_in_root = 0;
+ p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
+ DENTRY_SIZE_BITS);
+
+ p_fs->vol_flag = VOL_CLEAN;
+ p_fs->clu_srch_ptr = 2;
+ p_fs->used_clusters = (u32)~0;
+
+ p_fs->fs_func = &fat_fs_func;
+
+ return FFS_SUCCESS;
+}
+
+static struct fs_func exfat_fs_func = {
+ .alloc_cluster = exfat_alloc_cluster,
+ .free_cluster = exfat_free_cluster,
+ .count_used_clusters = exfat_count_used_clusters,
+
+ .init_dir_entry = exfat_init_dir_entry,
+ .init_ext_entry = exfat_init_ext_entry,
+ .find_dir_entry = exfat_find_dir_entry,
+ .delete_dir_entry = exfat_delete_dir_entry,
+ .get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry,
+ .count_ext_entries = exfat_count_ext_entries,
+ .calc_num_entries = exfat_calc_num_entries,
+
+ .get_entry_type = exfat_get_entry_type,
+ .set_entry_type = exfat_set_entry_type,
+ .get_entry_attr = exfat_get_entry_attr,
+ .set_entry_attr = exfat_set_entry_attr,
+ .get_entry_flag = exfat_get_entry_flag,
+ .set_entry_flag = exfat_set_entry_flag,
+ .get_entry_clu0 = exfat_get_entry_clu0,
+ .set_entry_clu0 = exfat_set_entry_clu0,
+ .get_entry_size = exfat_get_entry_size,
+ .set_entry_size = exfat_set_entry_size,
+ .get_entry_time = exfat_get_entry_time,
+ .set_entry_time = exfat_set_entry_time,
+};
+
+s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
+{
+ struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bpb->num_fats == 0)
+ return FFS_FORMATERR;
+
+ p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits;
+ p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits;
+ p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
+ p_bd->sector_size_bits;
+ p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
+
+ p_fs->num_FAT_sectors = GET32(p_bpb->fat_length);
+
+ p_fs->FAT1_start_sector = p_fs->PBR_sector + GET32(p_bpb->fat_offset);
+ if (p_bpb->num_fats == 1)
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
+ else
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
+ p_fs->num_FAT_sectors;
+
+ p_fs->root_start_sector = p_fs->PBR_sector + GET32(p_bpb->clu_offset);
+ p_fs->data_start_sector = p_fs->root_start_sector;
+
+ p_fs->num_sectors = GET64(p_bpb->vol_length);
+ p_fs->num_clusters = GET32(p_bpb->clu_count) + 2;
+ /* because the cluster index starts with 2 */
+
+ p_fs->vol_type = EXFAT;
+ p_fs->vol_id = GET32(p_bpb->vol_serial);
+
+ p_fs->root_dir = GET32(p_bpb->root_cluster);
+ p_fs->dentries_in_root = 0;
+ p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
+ DENTRY_SIZE_BITS);
+
+ p_fs->vol_flag = (u32)GET16(p_bpb->vol_flags);
+ p_fs->clu_srch_ptr = 2;
+ p_fs->used_clusters = (u32)~0;
+
+ p_fs->fs_func = &exfat_fs_func;
+
+ return FFS_SUCCESS;
+}
+
+s32 create_dir(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, struct file_id_t *fid)
+{
+ s32 ret, dentry, num_entries;
+ u64 size;
+ struct chain_t clu;
+ struct dos_name_t dos_name, dot_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
+ &dos_name);
+ if (ret)
+ return ret;
+
+ /* find_empty_entry must be called before alloc_cluster */
+ dentry = find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0)
+ return FFS_FULL;
+
+ clu.dir = CLUSTER_32(~0);
+ clu.size = 0;
+ clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+
+ /* (1) allocate a cluster */
+ ret = fs_func->alloc_cluster(sb, 1, &clu);
+ if (ret < 0)
+ return FFS_MEDIAERR;
+ else if (ret == 0)
+ return FFS_FULL;
+
+ ret = clear_cluster(sb, clu.dir);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ if (p_fs->vol_type == EXFAT) {
+ size = p_fs->cluster_size;
+ } else {
+ size = 0;
+
+ /* initialize the . and .. entry
+ * Information for . points to itself
+ * Information for .. points to parent dir
+ */
+
+ dot_name.name_case = 0x0;
+ memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH);
+
+ ret = fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir,
+ 0);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH);
+
+ if (p_dir->dir == p_fs->root_dir)
+ ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
+ CLUSTER_32(0), 0);
+ else
+ ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
+ p_dir->dir, 0);
+
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL,
+ &dot_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+ }
+
+ /* (2) update the directory entry */
+ /* make sub-dir entry in parent directory */
+ ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
+ size);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fid->dir.dir = p_dir->dir;
+ fid->dir.size = p_dir->size;
+ fid->dir.flags = p_dir->flags;
+ fid->entry = dentry;
+
+ fid->attr = ATTR_SUBDIR;
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->size = size;
+ fid->start_clu = clu.dir;
+
+ fid->type = TYPE_DIR;
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+
+ return FFS_SUCCESS;
+}
+
+s32 create_file(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid)
+{
+ s32 ret, dentry, num_entries;
+ struct dos_name_t dos_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
+ &dos_name);
+ if (ret)
+ return ret;
+
+ /* find_empty_entry must be called before alloc_cluster() */
+ dentry = find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0)
+ return FFS_FULL;
+
+ /* (1) update the directory entry */
+ /* fill the dos name directory entry information of the created file.
+ * the first cluster is not determined yet. (0)
+ */
+ ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
+ CLUSTER_32(0), 0);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fid->dir.dir = p_dir->dir;
+ fid->dir.size = p_dir->size;
+ fid->dir.flags = p_dir->flags;
+ fid->entry = dentry;
+
+ fid->attr = ATTR_ARCHIVE | mode;
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->size = 0;
+ fid->start_clu = CLUSTER_32(~0);
+
+ fid->type = TYPE_FILE;
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+
+ return FFS_SUCCESS;
+}
+
+void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
+{
+ s32 num_entries;
+ sector_t sector;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ ep = get_entry_in_dir(sb, p_dir, entry, &sector);
+ if (!ep)
+ return;
+
+ buf_lock(sb, sector);
+
+ /* buf_lock() before call count_ext_entries() */
+ num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
+ if (num_entries < 0) {
+ buf_unlock(sb, sector);
+ return;
+ }
+ num_entries++;
+
+ buf_unlock(sb, sector);
+
+ /* (1) update the directory entry */
+ fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
+}
+
+s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
+ struct uni_name_t *p_uniname, struct file_id_t *fid)
+{
+ s32 ret, newentry = -1, num_old_entries, num_new_entries;
+ sector_t sector_old, sector_new;
+ struct dos_name_t dos_name;
+ struct dentry_t *epold, *epnew;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
+ if (!epold)
+ return FFS_MEDIAERR;
+
+ buf_lock(sb, sector_old);
+
+ /* buf_lock() before call count_ext_entries() */
+ num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
+ epold);
+ if (num_old_entries < 0) {
+ buf_unlock(sb, sector_old);
+ return FFS_MEDIAERR;
+ }
+ num_old_entries++;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname,
+ &num_new_entries, &dos_name);
+ if (ret) {
+ buf_unlock(sb, sector_old);
+ return ret;
+ }
+
+ if (num_old_entries < num_new_entries) {
+ newentry = find_empty_entry(inode, p_dir, num_new_entries);
+ if (newentry < 0) {
+ buf_unlock(sb, sector_old);
+ return FFS_FULL;
+ }
+
+ epnew = get_entry_in_dir(sb, p_dir, newentry, &sector_new);
+ if (!epnew) {
+ buf_unlock(sb, sector_old);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
+ fs_func->set_entry_attr(epnew,
+ fs_func->get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_old);
+
+ if (p_fs->vol_type == EXFAT) {
+ epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
+ &sector_old);
+ buf_lock(sb, sector_old);
+ epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
+ &sector_new);
+
+ if (!epold || !epnew) {
+ buf_unlock(sb, sector_old);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_old);
+ }
+
+ ret = fs_func->init_ext_entry(sb, p_dir, newentry,
+ num_new_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
+ num_old_entries);
+ fid->entry = newentry;
+ } else {
+ if (fs_func->get_entry_type(epold) == TYPE_FILE) {
+ fs_func->set_entry_attr(epold,
+ fs_func->get_entry_attr(epold) |
+ ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ buf_modify(sb, sector_old);
+ buf_unlock(sb, sector_old);
+
+ ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
+ num_new_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
+ num_old_entries);
+ }
+
+ return FFS_SUCCESS;
+}
+
+s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
+ struct chain_t *p_newdir, struct uni_name_t *p_uniname,
+ struct file_id_t *fid)
+{
+ s32 ret, newentry, num_new_entries, num_old_entries;
+ sector_t sector_mov, sector_new;
+ struct chain_t clu;
+ struct dos_name_t dos_name;
+ struct dentry_t *epmov, *epnew;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
+ if (!epmov)
+ return FFS_MEDIAERR;
+
+ /* check if the source and target directory is the same */
+ if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
+ fs_func->get_entry_clu0(epmov) == p_newdir->dir)
+ return FFS_INVALIDPATH;
+
+ buf_lock(sb, sector_mov);
+
+ /* buf_lock() before call count_ext_entries() */
+ num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
+ epmov);
+ if (num_old_entries < 0) {
+ buf_unlock(sb, sector_mov);
+ return FFS_MEDIAERR;
+ }
+ num_old_entries++;
+
+ ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname,
+ &num_new_entries, &dos_name);
+ if (ret) {
+ buf_unlock(sb, sector_mov);
+ return ret;
+ }
+
+ newentry = find_empty_entry(inode, p_newdir, num_new_entries);
+ if (newentry < 0) {
+ buf_unlock(sb, sector_mov);
+ return FFS_FULL;
+ }
+
+ epnew = get_entry_in_dir(sb, p_newdir, newentry, &sector_new);
+ if (!epnew) {
+ buf_unlock(sb, sector_mov);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
+ fs_func->set_entry_attr(epnew, fs_func->get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_mov);
+
+ if (p_fs->vol_type == EXFAT) {
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
+ &sector_mov);
+ buf_lock(sb, sector_mov);
+ epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
+ &sector_new);
+ if (!epmov || !epnew) {
+ buf_unlock(sb, sector_mov);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_mov);
+ } else if (fs_func->get_entry_type(epnew) == TYPE_DIR) {
+ /* change ".." pointer to new parent dir */
+ clu.dir = fs_func->get_entry_clu0(epnew);
+ clu.flags = 0x01;
+
+ epnew = get_entry_in_dir(sb, &clu, 1, &sector_new);
+ if (!epnew)
+ return FFS_MEDIAERR;
+
+ if (p_newdir->dir == p_fs->root_dir)
+ fs_func->set_entry_clu0(epnew, CLUSTER_32(0));
+ else
+ fs_func->set_entry_clu0(epnew, p_newdir->dir);
+ buf_modify(sb, sector_new);
+ }
+
+ ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
+ p_uniname, &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
+
+ fid->dir.dir = p_newdir->dir;
+ fid->dir.size = p_newdir->size;
+ fid->dir.flags = p_newdir->flags;
+
+ fid->entry = newentry;
+
+ return FFS_SUCCESS;
+}
+
+/*
+ * Sector Read/Write Functions
+ */
+
+int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
+ bool read)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n",
+ __func__, (unsigned long long)sec);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_read(sb, sec, bh, 1, read);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
+
+int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
+ bool sync)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n",
+ __func__, (unsigned long long)sec);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!bh) {
+ pr_err("[EXFAT] %s: bh is NULL!\n", __func__);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_write(sb, sec, bh, 1, sync);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
+
+int multi_sector_read(struct super_block *sb, sector_t sec,
+ struct buffer_head **bh, s32 num_secs, bool read)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n",
+ __func__, (unsigned long long)sec, num_secs);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_read(sb, sec, bh, num_secs, read);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
+
+int multi_sector_write(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh, s32 num_secs, bool sync)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n",
+ __func__, (unsigned long long)sec, num_secs);
+ fs_error(sb);
+ return ret;
+ }
+ if (!bh) {
+ pr_err("[EXFAT] %s: bh is NULL!\n", __func__);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_write(sb, sec, bh, num_secs, sync);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
new file mode 100644
index 000000000000..03cb8290b5d2
--- /dev/null
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/string.h>
+#include <linux/nls.h>
+#include "exfat.h"
+
+static u16 bad_dos_chars[] = {
+ /* + , ; = [ ] */
+ 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D,
+ 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D,
+ 0
+};
+
+static u16 bad_uni_chars[] = {
+ /* " * / : < > ? \ | */
+ 0x0022, 0x002A, 0x002F, 0x003A,
+ 0x003C, 0x003E, 0x003F, 0x005C, 0x007C,
+ 0
+};
+
+static int convert_ch_to_uni(struct nls_table *nls, u16 *uni, u8 *ch,
+ bool *lossy)
+{
+ int len;
+
+ *uni = 0x0;
+
+ if (ch[0] < 0x80) {
+ *uni = (u16)ch[0];
+ return 1;
+ }
+
+ len = nls->char2uni(ch, NLS_MAX_CHARSET_SIZE, uni);
+ if (len < 0) {
+ /* conversion failed */
+ pr_info("%s: fail to use nls\n", __func__);
+ if (lossy)
+ *lossy = true;
+ *uni = (u16)'_';
+ if (!strcmp(nls->charset, "utf8"))
+ return 1;
+ else
+ return 2;
+ }
+
+ return len;
+}
+
+static int convert_uni_to_ch(struct nls_table *nls, u8 *ch, u16 uni,
+ bool *lossy)
+{
+ int len;
+
+ ch[0] = 0x0;
+
+ if (uni < 0x0080) {
+ ch[0] = (u8)uni;
+ return 1;
+ }
+
+ len = nls->uni2char(uni, ch, NLS_MAX_CHARSET_SIZE);
+ if (len < 0) {
+ /* conversion failed */
+ pr_info("%s: fail to use nls\n", __func__);
+ if (lossy)
+ *lossy = true;
+ ch[0] = '_';
+ return 1;
+ }
+
+ return len;
+}
+
+u16 nls_upper(struct super_block *sb, u16 a)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (EXFAT_SB(sb)->options.casesensitive)
+ return a;
+ if (p_fs->vol_utbl && p_fs->vol_utbl[get_col_index(a)])
+ return p_fs->vol_utbl[get_col_index(a)][get_row_index(a)];
+ else
+ return a;
+}
+
+static u16 *nls_wstrchr(u16 *str, u16 wchar)
+{
+ while (*str) {
+ if (*(str++) == wchar)
+ return str;
+ }
+
+ return NULL;
+}
+
+int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b)
+{
+ return strncmp(a, b, DOS_NAME_LENGTH);
+}
+
+int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
+{
+ int i;
+
+ for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) {
+ if (nls_upper(sb, *a) != nls_upper(sb, *b))
+ return 1;
+ if (*a == 0x0)
+ return 0;
+ }
+ return 0;
+}
+
+void nls_uniname_to_dosname(struct super_block *sb,
+ struct dos_name_t *p_dosname,
+ struct uni_name_t *p_uniname, bool *p_lossy)
+{
+ int i, j, len;
+ bool lossy = false;
+ u8 buf[MAX_CHARSET_SIZE];
+ u8 lower = 0, upper = 0;
+ u8 *dosname = p_dosname->name;
+ u16 *uniname = p_uniname->name;
+ u16 *p, *last_period;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
+
+ for (i = 0; i < DOS_NAME_LENGTH; i++)
+ *(dosname + i) = ' ';
+
+ if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_CUR_DIR_NAME)) {
+ *(dosname) = '.';
+ p_dosname->name_case = 0x0;
+ if (p_lossy)
+ *p_lossy = false;
+ return;
+ }
+
+ if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_PAR_DIR_NAME)) {
+ *(dosname) = '.';
+ *(dosname + 1) = '.';
+ p_dosname->name_case = 0x0;
+ if (p_lossy)
+ *p_lossy = false;
+ return;
+ }
+
+ /* search for the last embedded period */
+ last_period = NULL;
+ for (p = uniname; *p; p++) {
+ if (*p == (u16)'.')
+ last_period = p;
+ }
+
+ i = 0;
+ while (i < DOS_NAME_LENGTH) {
+ if (i == 8) {
+ if (!last_period)
+ break;
+
+ if (uniname <= last_period) {
+ if (uniname < last_period)
+ lossy = true;
+ uniname = last_period + 1;
+ }
+ }
+
+ if (*uniname == (u16)'\0') {
+ break;
+ } else if (*uniname == (u16)' ') {
+ lossy = true;
+ } else if (*uniname == (u16)'.') {
+ if (uniname < last_period)
+ lossy = true;
+ else
+ i = 8;
+ } else if (nls_wstrchr(bad_dos_chars, *uniname)) {
+ lossy = true;
+ *(dosname + i) = '_';
+ i++;
+ } else {
+ len = convert_uni_to_ch(nls, buf, *uniname, &lossy);
+
+ if (len > 1) {
+ if ((i >= 8) && ((i + len) > DOS_NAME_LENGTH))
+ break;
+
+ if ((i < 8) && ((i + len) > 8)) {
+ i = 8;
+ continue;
+ }
+
+ lower = 0xFF;
+
+ for (j = 0; j < len; j++, i++)
+ *(dosname + i) = *(buf + j);
+ } else { /* len == 1 */
+ if ((*buf >= 'a') && (*buf <= 'z')) {
+ *(dosname + i) = *buf - ('a' - 'A');
+
+ if (i < 8)
+ lower |= 0x08;
+ else
+ lower |= 0x10;
+ } else if ((*buf >= 'A') && (*buf <= 'Z')) {
+ *(dosname + i) = *buf;
+
+ if (i < 8)
+ upper |= 0x08;
+ else
+ upper |= 0x10;
+ } else {
+ *(dosname + i) = *buf;
+ }
+ i++;
+ }
+ }
+
+ uniname++;
+ }
+
+ if (*dosname == 0xE5)
+ *dosname = 0x05;
+
+ if (*uniname != 0x0)
+ lossy = true;
+
+ if (upper & lower)
+ p_dosname->name_case = 0xFF;
+ else
+ p_dosname->name_case = lower;
+
+ if (p_lossy)
+ *p_lossy = lossy;
+}
+
+void nls_dosname_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname)
+{
+ int i = 0, j, n = 0;
+ u8 buf[DOS_NAME_LENGTH + 2];
+ u8 *dosname = p_dosname->name;
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
+
+ if (*dosname == 0x05) {
+ *buf = 0xE5;
+ i++;
+ n++;
+ }
+
+ for (; i < 8; i++, n++) {
+ if (*(dosname + i) == ' ')
+ break;
+
+ if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
+ (p_dosname->name_case & 0x08))
+ *(buf + n) = *(dosname + i) + ('a' - 'A');
+ else
+ *(buf + n) = *(dosname + i);
+ }
+ if (*(dosname + 8) != ' ') {
+ *(buf + n) = '.';
+ n++;
+ }
+
+ for (i = 8; i < DOS_NAME_LENGTH; i++, n++) {
+ if (*(dosname + i) == ' ')
+ break;
+
+ if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
+ (p_dosname->name_case & 0x10))
+ *(buf + n) = *(dosname + i) + ('a' - 'A');
+ else
+ *(buf + n) = *(dosname + i);
+ }
+ *(buf + n) = '\0';
+
+ i = 0;
+ j = 0;
+ while (j < (MAX_NAME_LENGTH - 1)) {
+ if (*(buf + i) == '\0')
+ break;
+
+ i += convert_ch_to_uni(nls, uniname, (buf + i), NULL);
+
+ uniname++;
+ j++;
+ }
+
+ *uniname = (u16)'\0';
+}
+
+void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
+ struct uni_name_t *p_uniname)
+{
+ int i, j, len;
+ u8 buf[MAX_CHARSET_SIZE];
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_io;
+
+ if (!nls) {
+ len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH,
+ UTF16_HOST_ENDIAN, p_cstring,
+ MAX_NAME_LENGTH);
+ p_cstring[len] = 0;
+ return;
+ }
+
+ i = 0;
+ while (i < (MAX_NAME_LENGTH - 1)) {
+ if (*uniname == (u16)'\0')
+ break;
+
+ len = convert_uni_to_ch(nls, buf, *uniname, NULL);
+
+ if (len > 1) {
+ for (j = 0; j < len; j++)
+ *p_cstring++ = (char)*(buf + j);
+ } else { /* len == 1 */
+ *p_cstring++ = (char)*buf;
+ }
+
+ uniname++;
+ i++;
+ }
+
+ *p_cstring = '\0';
+}
+
+void nls_cstring_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname, u8 *p_cstring,
+ bool *p_lossy)
+{
+ int i, j;
+ bool lossy = false;
+ u8 *end_of_name;
+ u8 upname[MAX_NAME_LENGTH * 2];
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_io;
+
+ /* strip all trailing spaces */
+ end_of_name = p_cstring + strlen(p_cstring);
+
+ while (*(--end_of_name) == ' ') {
+ if (end_of_name < p_cstring)
+ break;
+ }
+ *(++end_of_name) = '\0';
+
+ if (strcmp(p_cstring, ".") && strcmp(p_cstring, "..")) {
+ /* strip all trailing periods */
+ while (*(--end_of_name) == '.') {
+ if (end_of_name < p_cstring)
+ break;
+ }
+ *(++end_of_name) = '\0';
+ }
+
+ if (*p_cstring == '\0')
+ lossy = true;
+
+ if (!nls) {
+ i = utf8s_to_utf16s(p_cstring, MAX_NAME_LENGTH,
+ UTF16_HOST_ENDIAN, uniname,
+ MAX_NAME_LENGTH);
+ for (j = 0; j < i; j++)
+ SET16_A(upname + j * 2, nls_upper(sb, uniname[j]));
+ uniname[i] = '\0';
+ } else {
+ i = 0;
+ j = 0;
+ while (j < (MAX_NAME_LENGTH - 1)) {
+ if (*(p_cstring + i) == '\0')
+ break;
+
+ i += convert_ch_to_uni(nls, uniname,
+ (u8 *)(p_cstring + i), &lossy);
+
+ if ((*uniname < 0x0020) ||
+ nls_wstrchr(bad_uni_chars, *uniname))
+ lossy = true;
+
+ SET16_A(upname + j * 2, nls_upper(sb, *uniname));
+
+ uniname++;
+ j++;
+ }
+
+ if (*(p_cstring + i) != '\0')
+ lossy = true;
+ *uniname = (u16)'\0';
+ }
+
+ p_uniname->name_len = j;
+ p_uniname->name_hash = calc_checksum_2byte(upname, j << 1, 0,
+ CS_DEFAULT);
+
+ if (p_lossy)
+ *p_lossy = lossy;
+}
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
new file mode 100644
index 000000000000..280bf0d1cf0b
--- /dev/null
+++ b/drivers/staging/exfat/exfat_super.c
@@ -0,0 +1,4146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <linux/aio.h>
+#include <linux/iversion.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/writeback.h>
+#include <linux/log2.h>
+#include <linux/hash.h>
+#include <linux/backing-dev.h>
+#include <linux/sched.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/time.h>
+
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/mutex.h>
+#include <linux/swap.h>
+
+#define EXFAT_VERSION "1.3.0"
+
+#include "exfat.h"
+
+static struct kmem_cache *exfat_inode_cachep;
+
+// FIXME use commented lines
+// static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE;
+// static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET;
+static int exfat_default_codepage = CONFIG_FAT_DEFAULT_CODEPAGE;
+static char exfat_default_iocharset[] = CONFIG_FAT_DEFAULT_IOCHARSET;
+
+#define INC_IVERSION(x) (inode_inc_iversion(x))
+#define GET_IVERSION(x) (inode_peek_iversion_raw(x))
+#define SET_IVERSION(x, y) (inode_set_iversion(x, y))
+
+static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos);
+static int exfat_sync_inode(struct inode *inode);
+static struct inode *exfat_build_inode(struct super_block *sb,
+ struct file_id_t *fid, loff_t i_pos);
+static int exfat_write_inode(struct inode *inode,
+ struct writeback_control *wbc);
+static void exfat_write_super(struct super_block *sb);
+
+#define UNIX_SECS_1980 315532800L
+
+#if BITS_PER_LONG == 64
+#define UNIX_SECS_2108 4354819200L
+#endif
+
+/* days between 1.1.70 and 1.1.80 (2 leap days) */
+#define DAYS_DELTA_DECADE (365 * 10 + 2)
+/* 120 (2100 - 1980) isn't leap year */
+#define NO_LEAP_YEAR_2100 (120)
+#define IS_LEAP_YEAR(y) (!((y) & 0x3) && (y) != NO_LEAP_YEAR_2100)
+
+#define SECS_PER_MIN (60)
+#define SECS_PER_HOUR (60 * SECS_PER_MIN)
+#define SECS_PER_DAY (24 * SECS_PER_HOUR)
+
+#define MAKE_LEAP_YEAR(leap_year, year) \
+ do { \
+ if (unlikely(year > NO_LEAP_YEAR_2100)) \
+ leap_year = ((year + 3) / 4) - 1; \
+ else \
+ leap_year = ((year + 3) / 4); \
+ } while (0)
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+static time_t accum_days_in_year[] = {
+ /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
+ 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
+};
+
+/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+static void exfat_time_fat2unix(struct exfat_sb_info *sbi,
+ struct timespec64 *ts, struct date_time_t *tp)
+{
+ time_t year = tp->Year;
+ time_t ld;
+
+ MAKE_LEAP_YEAR(ld, year);
+
+ if (IS_LEAP_YEAR(year) && (tp->Month) > 2)
+ ld++;
+
+ ts->tv_sec = tp->Second +
+ tp->Minute * SECS_PER_MIN +
+ tp->Hour * SECS_PER_HOUR +
+ (ld + accum_days_in_year[(tp->Month)] +
+ (tp->Day - 1)) * SECS_PER_DAY +
+ (year * 365 + DAYS_DELTA_DECADE) * SECS_PER_DAY +
+ sys_tz.tz_minuteswest * SECS_PER_MIN;
+
+ ts->tv_nsec = 0;
+}
+
+/* Convert linear UNIX date to a FAT time/date pair. */
+static void exfat_time_unix2fat(struct exfat_sb_info *sbi,
+ struct timespec64 *ts, struct date_time_t *tp)
+{
+ time_t second = ts->tv_sec;
+ time_t day, month, year;
+ time_t ld;
+
+ second -= sys_tz.tz_minuteswest * SECS_PER_MIN;
+
+ /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
+ if (second < UNIX_SECS_1980) {
+ tp->Second = 0;
+ tp->Minute = 0;
+ tp->Hour = 0;
+ tp->Day = 1;
+ tp->Month = 1;
+ tp->Year = 0;
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ if (second >= UNIX_SECS_2108) {
+ tp->Second = 59;
+ tp->Minute = 59;
+ tp->Hour = 23;
+ tp->Day = 31;
+ tp->Month = 12;
+ tp->Year = 127;
+ return;
+ }
+#endif
+ day = second / SECS_PER_DAY - DAYS_DELTA_DECADE;
+ year = day / 365;
+ MAKE_LEAP_YEAR(ld, year);
+ if (year * 365 + ld > day)
+ year--;
+
+ MAKE_LEAP_YEAR(ld, year);
+ day -= year * 365 + ld;
+
+ if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) {
+ month = 2;
+ } else {
+ if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3])
+ day--;
+ for (month = 1; month < 12; month++) {
+ if (accum_days_in_year[month + 1] > day)
+ break;
+ }
+ }
+ day -= accum_days_in_year[month];
+
+ tp->Second = second % SECS_PER_MIN;
+ tp->Minute = (second / SECS_PER_MIN) % 60;
+ tp->Hour = (second / SECS_PER_HOUR) % 24;
+ tp->Day = day + 1;
+ tp->Month = month;
+ tp->Year = year;
+}
+
+struct timestamp_t *tm_current(struct timestamp_t *tp)
+{
+ struct timespec64 ts;
+ time_t second, day, leap_day, month, year;
+
+ ktime_get_real_ts64(&ts);
+
+ second = ts.tv_sec;
+ second -= sys_tz.tz_minuteswest * SECS_PER_MIN;
+
+ /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
+ if (second < UNIX_SECS_1980) {
+ tp->sec = 0;
+ tp->min = 0;
+ tp->hour = 0;
+ tp->day = 1;
+ tp->mon = 1;
+ tp->year = 0;
+ return tp;
+ }
+#if BITS_PER_LONG == 64
+ if (second >= UNIX_SECS_2108) {
+ tp->sec = 59;
+ tp->min = 59;
+ tp->hour = 23;
+ tp->day = 31;
+ tp->mon = 12;
+ tp->year = 127;
+ return tp;
+ }
+#endif
+
+ day = second / SECS_PER_DAY - DAYS_DELTA_DECADE;
+ year = day / 365;
+
+ MAKE_LEAP_YEAR(leap_day, year);
+ if (year * 365 + leap_day > day)
+ year--;
+
+ MAKE_LEAP_YEAR(leap_day, year);
+
+ day -= year * 365 + leap_day;
+
+ if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) {
+ month = 2;
+ } else {
+ if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3])
+ day--;
+ for (month = 1; month < 12; month++) {
+ if (accum_days_in_year[month + 1] > day)
+ break;
+ }
+ }
+ day -= accum_days_in_year[month];
+
+ tp->sec = second % SECS_PER_MIN;
+ tp->min = (second / SECS_PER_MIN) % 60;
+ tp->hour = (second / SECS_PER_HOUR) % 24;
+ tp->day = day + 1;
+ tp->mon = month;
+ tp->year = year;
+
+ return tp;
+}
+
+static void __lock_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ mutex_lock(&sbi->s_lock);
+}
+
+static void __unlock_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ mutex_unlock(&sbi->s_lock);
+}
+
+static int __is_sb_dirty(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ return sbi->s_dirt;
+}
+
+static void __set_sb_clean(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ sbi->s_dirt = 0;
+}
+
+static int __exfat_revalidate(struct dentry *dentry)
+{
+ return 0;
+}
+
+static int exfat_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (dentry->d_inode)
+ return 1;
+ return __exfat_revalidate(dentry);
+}
+
+static int exfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (dentry->d_inode)
+ return 1;
+
+ if (!flags)
+ return 0;
+
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ return 0;
+
+ return __exfat_revalidate(dentry);
+}
+
+static unsigned int __exfat_striptail_len(unsigned int len, const char *name)
+{
+ while (len && name[len - 1] == '.')
+ len--;
+ return len;
+}
+
+static unsigned int exfat_striptail_len(const struct qstr *qstr)
+{
+ return __exfat_striptail_len(qstr->len, qstr->name);
+}
+
+static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
+{
+ qstr->hash = full_name_hash(dentry, qstr->name,
+ exfat_striptail_len(qstr));
+ return 0;
+}
+
+static int exfat_d_hashi(const struct dentry *dentry, struct qstr *qstr)
+{
+ struct super_block *sb = dentry->d_sb;
+ const unsigned char *name;
+ unsigned int len;
+ unsigned long hash;
+
+ name = qstr->name;
+ len = exfat_striptail_len(qstr);
+
+ hash = init_name_hash(dentry);
+ while (len--)
+ hash = partial_name_hash(nls_upper(sb, *name++), hash);
+ qstr->hash = end_name_hash(hash);
+
+ return 0;
+}
+
+static int exfat_cmpi(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ struct nls_table *t = EXFAT_SB(dentry->d_sb)->nls_io;
+ unsigned int alen, blen;
+
+ alen = exfat_striptail_len(name);
+ blen = __exfat_striptail_len(len, str);
+ if (alen == blen) {
+ if (t == NULL) {
+ if (strncasecmp(name->name, str, alen) == 0)
+ return 0;
+ } else {
+ if (nls_strnicmp(t, name->name, str, alen) == 0)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int exfat_cmp(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ unsigned int alen, blen;
+
+ alen = exfat_striptail_len(name);
+ blen = __exfat_striptail_len(len, str);
+ if (alen == blen) {
+ if (strncmp(name->name, str, alen) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+static const struct dentry_operations exfat_ci_dentry_ops = {
+ .d_revalidate = exfat_revalidate_ci,
+ .d_hash = exfat_d_hashi,
+ .d_compare = exfat_cmpi,
+};
+
+static const struct dentry_operations exfat_dentry_ops = {
+ .d_revalidate = exfat_revalidate,
+ .d_hash = exfat_d_hash,
+ .d_compare = exfat_cmp,
+};
+
+static DEFINE_SEMAPHORE(z_sem);
+
+static inline void fs_sync(struct super_block *sb, bool do_sync)
+{
+ if (do_sync)
+ bdev_sync(sb);
+}
+
+/*
+ * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to
+ * save ATTR_RO instead of ->i_mode.
+ *
+ * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only
+ * bit, it's just used as flag for app.
+ */
+static inline int exfat_mode_can_hold_ro(struct inode *inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+ if (S_ISDIR(inode->i_mode))
+ return 0;
+
+ if ((~sbi->options.fs_fmask) & 0222)
+ return 1;
+ return 0;
+}
+
+/* Convert attribute bits and a mask to the UNIX mode. */
+static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, u32 attr,
+ mode_t mode)
+{
+ if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR))
+ mode &= ~0222;
+
+ if (attr & ATTR_SUBDIR)
+ return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
+ else if (attr & ATTR_SYMLINK)
+ return (mode & ~sbi->options.fs_dmask) | S_IFLNK;
+ else
+ return (mode & ~sbi->options.fs_fmask) | S_IFREG;
+}
+
+/* Return the FAT attribute byte for this inode */
+static inline u32 exfat_make_attr(struct inode *inode)
+{
+ if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222))
+ return (EXFAT_I(inode)->fid.attr) | ATTR_READONLY;
+ else
+ return EXFAT_I(inode)->fid.attr;
+}
+
+static inline void exfat_save_attr(struct inode *inode, u32 attr)
+{
+ if (exfat_mode_can_hold_ro(inode))
+ EXFAT_I(inode)->fid.attr = attr & ATTR_RWMASK;
+ else
+ EXFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY);
+}
+
+static int ffsMountVol(struct super_block *sb)
+{
+ int i, ret;
+ struct pbr_sector_t *p_pbr;
+ struct buffer_head *tmp_bh = NULL;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ pr_info("[EXFAT] trying to mount...\n");
+
+ down(&z_sem);
+
+ buf_init(sb);
+
+ sema_init(&p_fs->v_sem, 1);
+ p_fs->dev_ejected = 0;
+
+ /* open the block device */
+ bdev_open(sb);
+
+ if (p_bd->sector_size < sb->s_blocksize) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ if (p_bd->sector_size > sb->s_blocksize)
+ sb_set_blocksize(sb, p_bd->sector_size);
+
+ /* read Sector 0 */
+ if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ p_fs->PBR_sector = 0;
+
+ p_pbr = (struct pbr_sector_t *) tmp_bh->b_data;
+
+ /* check the validity of PBR */
+ if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) {
+ brelse(tmp_bh);
+ bdev_close(sb);
+ ret = FFS_FORMATERR;
+ goto out;
+ }
+
+ /* fill fs_struct */
+ for (i = 0; i < 53; i++)
+ if (p_pbr->bpb[i])
+ break;
+
+ if (i < 53) {
+#ifdef CONFIG_EXFAT_DONT_MOUNT_VFAT
+ ret = -EINVAL;
+ printk(KERN_INFO "EXFAT: Attempted to mount VFAT filesystem\n");
+ goto out;
+#else
+ if (GET16(p_pbr->bpb + 11)) /* num_fat_sectors */
+ ret = fat16_mount(sb, p_pbr);
+ else
+ ret = fat32_mount(sb, p_pbr);
+#endif
+ } else {
+ ret = exfat_mount(sb, p_pbr);
+ }
+
+ brelse(tmp_bh);
+
+ if (ret) {
+ bdev_close(sb);
+ goto out;
+ }
+
+ if (p_fs->vol_type == EXFAT) {
+ ret = load_alloc_bitmap(sb);
+ if (ret) {
+ bdev_close(sb);
+ goto out;
+ }
+ ret = load_upcase_table(sb);
+ if (ret) {
+ free_alloc_bitmap(sb);
+ bdev_close(sb);
+ goto out;
+ }
+ }
+
+ if (p_fs->dev_ejected) {
+ if (p_fs->vol_type == EXFAT) {
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ }
+ bdev_close(sb);
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ pr_info("[EXFAT] mounted successfully\n");
+
+out:
+ up(&z_sem);
+
+ return ret;
+}
+
+static int ffsUmountVol(struct super_block *sb)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int err = FFS_SUCCESS;
+
+ pr_info("[EXFAT] trying to unmount...\n");
+
+ down(&z_sem);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ if (p_fs->vol_type == EXFAT) {
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ }
+
+ FAT_release_all(sb);
+ buf_release_all(sb);
+
+ /* close the block device */
+ bdev_close(sb);
+
+ if (p_fs->dev_ejected) {
+ pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n");
+ err = FFS_MEDIAERR;
+ }
+
+ buf_shutdown(sb);
+
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+ up(&z_sem);
+
+ pr_info("[EXFAT] unmounted successfully\n");
+
+ return err;
+}
+
+static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
+{
+ int err = FFS_SUCCESS;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* check the validity of pointer parameters */
+ if (info == NULL)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (p_fs->used_clusters == (u32) ~0)
+ p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
+
+ info->FatType = p_fs->vol_type;
+ info->ClusterSize = p_fs->cluster_size;
+ info->NumClusters = p_fs->num_clusters - 2; /* clu 0 & 1 */
+ info->UsedClusters = p_fs->used_clusters;
+ info->FreeClusters = info->NumClusters - info->UsedClusters;
+
+ if (p_fs->dev_ejected)
+ err = FFS_MEDIAERR;
+
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return err;
+}
+
+static int ffsSyncVol(struct super_block *sb, bool do_sync)
+{
+ int err = FFS_SUCCESS;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* synchronize the file system */
+ fs_sync(sb, do_sync);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ if (p_fs->dev_ejected)
+ err = FFS_MEDIAERR;
+
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return err;
+}
+
+/*----------------------------------------------------------------------*/
+/* File Operation Functions */
+/*----------------------------------------------------------------------*/
+
+static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
+{
+ int ret, dentry, num_entries;
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct dos_name_t dos_name;
+ struct dentry_t *ep, *ep2;
+ struct entry_set_cache_t *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ pr_debug("%s entered\n", __func__);
+
+ /* check the validity of pointer parameters */
+ if ((fid == NULL) || (path == NULL) || (*path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check the validity of directory name in the given pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries,
+ &dos_name);
+ if (ret)
+ goto out;
+
+ /* search the file name for directories */
+ dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
+ &dos_name, TYPE_ALL);
+ if (dentry < -1) {
+ ret = FFS_NOTFOUND;
+ goto out;
+ }
+
+ fid->dir.dir = dir.dir;
+ fid->dir.size = dir.size;
+ fid->dir.flags = dir.flags;
+ fid->entry = dentry;
+
+ if (dentry == -1) {
+ fid->type = TYPE_DIR;
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+
+ fid->attr = ATTR_SUBDIR;
+ fid->flags = 0x01;
+ fid->size = 0;
+ fid->start_clu = p_fs->root_dir;
+ } else {
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &dir, dentry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep+1;
+ } else {
+ ep = get_entry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ }
+
+ fid->type = p_fs->fs_func->get_entry_type(ep);
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+ fid->attr = p_fs->fs_func->get_entry_attr(ep);
+
+ fid->size = p_fs->fs_func->get_entry_size(ep2);
+ if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->start_clu = CLUSTER_32(~0);
+ } else {
+ fid->flags = p_fs->fs_func->get_entry_flag(ep2);
+ fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
+ }
+
+ if (p_fs->vol_type == EXFAT)
+ release_entry_set(es);
+ }
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
+ struct file_id_t *fid)
+{
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int ret;
+
+ /* check the validity of pointer parameters */
+ if ((fid == NULL) || (path == NULL) || (*path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check the validity of directory name in the given pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* create a new file */
+ ret = create_file(inode, &dir, &uni_name, mode, fid);
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
+ u64 count, u64 *rcount)
+{
+ s32 offset, sec_offset, clu_offset;
+ u32 clu;
+ int ret = 0;
+ sector_t LogSector;
+ u64 oneblkread, read_bytes;
+ struct buffer_head *tmp_bh = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ /* check the validity of the given file id */
+ if (fid == NULL)
+ return FFS_INVALIDFID;
+
+ /* check the validity of pointer parameters */
+ if (buffer == NULL)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ if (count > (fid->size - fid->rwoffset))
+ count = fid->size - fid->rwoffset;
+
+ if (count == 0) {
+ if (rcount != NULL)
+ *rcount = 0;
+ ret = FFS_EOF;
+ goto out;
+ }
+
+ read_bytes = 0;
+
+ while (count > 0) {
+ clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
+ clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ clu += clu_offset;
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ clu = fid->hint_last_clu;
+ }
+
+ while (clu_offset > 0) {
+ /* clu = FAT_read(sb, clu); */
+ if (FAT_read(sb, clu, &clu) == -1)
+ return FFS_MEDIAERR;
+
+ clu_offset--;
+ }
+ }
+
+ /* hint information */
+ fid->hint_last_off = (s32)(fid->rwoffset >>
+ p_fs->cluster_size_bits);
+ fid->hint_last_clu = clu;
+
+ /* byte offset in cluster */
+ offset = (s32)(fid->rwoffset & (p_fs->cluster_size-1));
+
+ /* sector offset in cluster */
+ sec_offset = offset >> p_bd->sector_size_bits;
+
+ /* byte offset in sector */
+ offset &= p_bd->sector_size_mask;
+
+ LogSector = START_SECTOR(clu) + sec_offset;
+
+ oneblkread = (u64)(p_bd->sector_size - offset);
+ if (oneblkread > count)
+ oneblkread = count;
+
+ if ((offset == 0) && (oneblkread == p_bd->sector_size)) {
+ if (sector_read(sb, LogSector, &tmp_bh, 1) !=
+ FFS_SUCCESS)
+ goto err_out;
+ memcpy((char *)buffer + read_bytes,
+ (char *)tmp_bh->b_data, (s32)oneblkread);
+ } else {
+ if (sector_read(sb, LogSector, &tmp_bh, 1) !=
+ FFS_SUCCESS)
+ goto err_out;
+ memcpy((char *)buffer + read_bytes,
+ (char *)tmp_bh->b_data + offset,
+ (s32)oneblkread);
+ }
+ count -= oneblkread;
+ read_bytes += oneblkread;
+ fid->rwoffset += oneblkread;
+ }
+ brelse(tmp_bh);
+
+/* How did this ever work and not leak a brlse()?? */
+err_out:
+ /* set the size of read bytes */
+ if (rcount != NULL)
+ *rcount = read_bytes;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
+ void *buffer, u64 count, u64 *wcount)
+{
+ bool modified = false;
+ s32 offset, sec_offset, clu_offset;
+ s32 num_clusters, num_alloc, num_alloced = (s32) ~0;
+ int ret = 0;
+ u32 clu, last_clu;
+ sector_t LogSector, sector = 0;
+ u64 oneblkwrite, write_bytes;
+ struct chain_t new_clu;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct entry_set_cache_t *es = NULL;
+ struct buffer_head *tmp_bh = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ /* check the validity of the given file id */
+ if (fid == NULL)
+ return FFS_INVALIDFID;
+
+ /* check the validity of pointer parameters */
+ if (buffer == NULL)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ if (count == 0) {
+ if (wcount != NULL)
+ *wcount = 0;
+ ret = FFS_SUCCESS;
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ if (fid->size == 0)
+ num_clusters = 0;
+ else
+ num_clusters = (s32)((fid->size-1) >>
+ p_fs->cluster_size_bits) + 1;
+
+ write_bytes = 0;
+
+ while (count > 0) {
+ clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
+ clu = last_clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ clu = CLUSTER_32(~0);
+ else
+ clu += clu_offset;
+ }
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ clu = fid->hint_last_clu;
+ }
+
+ while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
+ last_clu = clu;
+ /* clu = FAT_read(sb, clu); */
+ if (FAT_read(sb, clu, &clu) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ clu_offset--;
+ }
+ }
+
+ if (clu == CLUSTER_32(~0)) {
+ num_alloc = (s32)((count - 1) >>
+ p_fs->cluster_size_bits) + 1;
+ new_clu.dir = (last_clu == CLUSTER_32(~0)) ?
+ CLUSTER_32(~0) : last_clu+1;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* (1) allocate a chain of clusters */
+ num_alloced = p_fs->fs_func->alloc_cluster(sb,
+ num_alloc,
+ &new_clu);
+ if (num_alloced == 0)
+ break;
+ if (num_alloced < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ /* (2) append to the FAT chain */
+ if (last_clu == CLUSTER_32(~0)) {
+ if (new_clu.flags == 0x01)
+ fid->flags = 0x01;
+ fid->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != fid->flags) {
+ exfat_chain_cont_cluster(sb,
+ fid->start_clu,
+ num_clusters);
+ fid->flags = 0x01;
+ modified = true;
+ }
+ if (new_clu.flags == 0x01)
+ FAT_write(sb, last_clu, new_clu.dir);
+ }
+
+ num_clusters += num_alloced;
+ clu = new_clu.dir;
+ }
+
+ /* hint information */
+ fid->hint_last_off = (s32)(fid->rwoffset >>
+ p_fs->cluster_size_bits);
+ fid->hint_last_clu = clu;
+
+ /* byte offset in cluster */
+ offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1));
+
+ /* sector offset in cluster */
+ sec_offset = offset >> p_bd->sector_size_bits;
+
+ /* byte offset in sector */
+ offset &= p_bd->sector_size_mask;
+
+ LogSector = START_SECTOR(clu) + sec_offset;
+
+ oneblkwrite = (u64)(p_bd->sector_size - offset);
+ if (oneblkwrite > count)
+ oneblkwrite = count;
+
+ if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) {
+ if (sector_read(sb, LogSector, &tmp_bh, 0) !=
+ FFS_SUCCESS)
+ goto err_out;
+ memcpy((char *)tmp_bh->b_data,
+ (char *)buffer + write_bytes, (s32)oneblkwrite);
+ if (sector_write(sb, LogSector, tmp_bh, 0) !=
+ FFS_SUCCESS) {
+ brelse(tmp_bh);
+ goto err_out;
+ }
+ } else {
+ if ((offset > 0) ||
+ ((fid->rwoffset+oneblkwrite) < fid->size)) {
+ if (sector_read(sb, LogSector, &tmp_bh, 1) !=
+ FFS_SUCCESS)
+ goto err_out;
+ } else {
+ if (sector_read(sb, LogSector, &tmp_bh, 0) !=
+ FFS_SUCCESS)
+ goto err_out;
+ }
+
+ memcpy((char *)tmp_bh->b_data + offset,
+ (char *)buffer + write_bytes, (s32)oneblkwrite);
+ if (sector_write(sb, LogSector, tmp_bh, 0) !=
+ FFS_SUCCESS) {
+ brelse(tmp_bh);
+ goto err_out;
+ }
+ }
+
+ count -= oneblkwrite;
+ write_bytes += oneblkwrite;
+ fid->rwoffset += oneblkwrite;
+
+ fid->attr |= ATTR_ARCHIVE;
+
+ if (fid->size < fid->rwoffset) {
+ fid->size = fid->rwoffset;
+ modified = true;
+ }
+ }
+
+ brelse(tmp_bh);
+
+ /* (3) update the direcoty entry */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (es == NULL)
+ goto err_out;
+ ep2 = ep+1;
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep)
+ goto err_out;
+ ep2 = ep;
+ }
+
+ p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ p_fs->fs_func->set_entry_attr(ep, fid->attr);
+
+ if (p_fs->vol_type != EXFAT)
+ buf_modify(sb, sector);
+
+ if (modified) {
+ if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
+ p_fs->fs_func->set_entry_flag(ep2, fid->flags);
+
+ if (p_fs->fs_func->get_entry_size(ep2) != fid->size)
+ p_fs->fs_func->set_entry_size(ep2, fid->size);
+
+ if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
+ p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
+
+ if (p_fs->vol_type != EXFAT)
+ buf_modify(sb, sector);
+ }
+
+ if (p_fs->vol_type == EXFAT) {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+err_out:
+ /* set the size of written bytes */
+ if (wcount != NULL)
+ *wcount = write_bytes;
+
+ if (num_alloced == 0)
+ ret = FFS_FULL;
+
+ else if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
+{
+ s32 num_clusters;
+ u32 last_clu = CLUSTER_32(0);
+ int ret = 0;
+ sector_t sector = 0;
+ struct chain_t clu;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct entry_set_cache_t *es = NULL;
+
+ pr_debug("%s entered (inode %p size %llu)\n", __func__, inode,
+ new_size);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+
+ if (fid->size != old_size) {
+ pr_err("[EXFAT] truncate : can't skip it because of size-mismatch(old:%lld->fid:%lld).\n",
+ old_size, fid->size);
+ }
+
+ if (old_size <= new_size) {
+ ret = FFS_SUCCESS;
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ clu.dir = fid->start_clu;
+ clu.size = (s32)((old_size-1) >> p_fs->cluster_size_bits) + 1;
+ clu.flags = fid->flags;
+
+ if (new_size > 0) {
+ num_clusters = (s32)((new_size-1) >>
+ p_fs->cluster_size_bits) + 1;
+
+ if (clu.flags == 0x03) {
+ clu.dir += num_clusters;
+ } else {
+ while (num_clusters > 0) {
+ last_clu = clu.dir;
+ if (FAT_read(sb, clu.dir, &clu.dir) == -1)
+ return FFS_MEDIAERR;
+ num_clusters--;
+ }
+ }
+
+ clu.size -= num_clusters;
+ }
+
+ fid->size = new_size;
+ fid->attr |= ATTR_ARCHIVE;
+ if (new_size == 0) {
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->start_clu = CLUSTER_32(~0);
+ }
+
+ /* (1) update the directory entry */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (es == NULL) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep+1;
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ }
+
+ p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ p_fs->fs_func->set_entry_attr(ep, fid->attr);
+
+ p_fs->fs_func->set_entry_size(ep2, new_size);
+ if (new_size == 0) {
+ p_fs->fs_func->set_entry_flag(ep2, 0x01);
+ p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
+ }
+
+ if (p_fs->vol_type != EXFAT) {
+ buf_modify(sb, sector);
+ } else {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+ /* (2) cut off from the FAT chain */
+ if (last_clu != CLUSTER_32(0)) {
+ if (fid->flags == 0x01)
+ FAT_write(sb, last_clu, CLUSTER_32(~0));
+ }
+
+ /* (3) free the clusters */
+ p_fs->fs_func->free_cluster(sb, &clu, 0);
+
+ /* hint information */
+ fid->hint_last_off = -1;
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ pr_debug("%s exited (%d)\n", __func__, ret);
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static void update_parent_info(struct file_id_t *fid,
+ struct inode *parent_inode)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(parent_inode->i_sb)->fs_info);
+ struct file_id_t *parent_fid = &(EXFAT_I(parent_inode)->fid);
+
+ if (unlikely((parent_fid->flags != fid->dir.flags) ||
+ (parent_fid->size !=
+ (fid->dir.size << p_fs->cluster_size_bits)) ||
+ (parent_fid->start_clu != fid->dir.dir))) {
+ fid->dir.dir = parent_fid->start_clu;
+ fid->dir.flags = parent_fid->flags;
+ fid->dir.size = ((parent_fid->size + (p_fs->cluster_size-1))
+ >> p_fs->cluster_size_bits);
+ }
+}
+
+static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
+ struct inode *new_parent_inode, struct dentry *new_dentry)
+{
+ s32 ret;
+ s32 dentry;
+ struct chain_t olddir, newdir;
+ struct chain_t *p_dir = NULL;
+ struct uni_name_t uni_name;
+ struct dentry_t *ep;
+ struct super_block *sb = old_parent_inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ u8 *new_path = (u8 *) new_dentry->d_name.name;
+ struct inode *new_inode = new_dentry->d_inode;
+ int num_entries;
+ struct file_id_t *new_fid = NULL;
+ s32 new_entry = 0;
+
+ /* check the validity of the given file id */
+ if (fid == NULL)
+ return FFS_INVALIDFID;
+
+ /* check the validity of pointer parameters */
+ if ((new_path == NULL) || (*new_path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ update_parent_info(fid, old_parent_inode);
+
+ olddir.dir = fid->dir.dir;
+ olddir.size = fid->dir.size;
+ olddir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ /* check if the old file is "." or ".." */
+ if (p_fs->vol_type != EXFAT) {
+ if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) {
+ ret = FFS_PERMISSIONERR;
+ goto out2;
+ }
+ }
+
+ ep = get_entry_in_dir(sb, &olddir, dentry, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out2;
+ }
+
+ if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ ret = FFS_PERMISSIONERR;
+ goto out2;
+ }
+
+ /* check whether new dir is existing directory and empty */
+ if (new_inode) {
+ u32 entry_type;
+
+ ret = FFS_MEDIAERR;
+ new_fid = &EXFAT_I(new_inode)->fid;
+
+ update_parent_info(new_fid, new_parent_inode);
+
+ p_dir = &(new_fid->dir);
+ new_entry = new_fid->entry;
+ ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
+ if (!ep)
+ goto out;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if (entry_type == TYPE_DIR) {
+ struct chain_t new_clu;
+
+ new_clu.dir = new_fid->start_clu;
+ new_clu.size = (s32)((new_fid->size - 1) >>
+ p_fs->cluster_size_bits) + 1;
+ new_clu.flags = new_fid->flags;
+
+ if (!is_dir_empty(sb, &new_clu)) {
+ ret = FFS_FILEEXIST;
+ goto out;
+ }
+ }
+ }
+
+ /* check the validity of directory name in the given new pathname */
+ ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name);
+ if (ret)
+ goto out2;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ if (olddir.dir == newdir.dir)
+ ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name,
+ fid);
+ else
+ ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
+ &uni_name, fid);
+
+ if ((ret == FFS_SUCCESS) && new_inode) {
+ /* delete entries of new_dir */
+ ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
+ if (!ep)
+ goto out;
+
+ num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir,
+ new_entry, ep);
+ if (num_entries < 0)
+ goto out;
+ p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0,
+ num_entries+1);
+ }
+out:
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out2:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
+{
+ s32 dentry;
+ int ret = FFS_SUCCESS;
+ struct chain_t dir, clu_to_free;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* check the validity of the given file id */
+ if (fid == NULL)
+ return FFS_INVALIDFID;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ ep = get_entry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ remove_file(inode, &dir, dentry);
+
+ clu_to_free.dir = fid->start_clu;
+ clu_to_free.size = (s32)((fid->size-1) >> p_fs->cluster_size_bits) + 1;
+ clu_to_free.flags = fid->flags;
+
+ /* (2) free the clusters */
+ p_fs->fs_func->free_cluster(sb, &clu_to_free, 0);
+
+ fid->size = 0;
+ fid->start_clu = CLUSTER_32(~0);
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+#if 0
+/* Not currently wired up */
+static int ffsSetAttr(struct inode *inode, u32 attr)
+{
+ u32 type;
+ int ret = FFS_SUCCESS;
+ sector_t sector = 0;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+ struct entry_set_cache_t *es = NULL;
+
+ if (fid->attr == attr) {
+ if (p_fs->dev_ejected)
+ return FFS_MEDIAERR;
+ return FFS_SUCCESS;
+ }
+
+ if (is_dir) {
+ if ((fid->dir.dir == p_fs->root_dir) &&
+ (fid->entry == -1)) {
+ if (p_fs->dev_ejected)
+ return FFS_MEDIAERR;
+ return FFS_SUCCESS;
+ }
+ }
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* get the directory entry of given file */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (es == NULL) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+
+ type = p_fs->fs_func->get_entry_type(ep);
+
+ if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
+ ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ else
+ ret = FFS_ERROR;
+
+ if (p_fs->vol_type == EXFAT)
+ release_entry_set(es);
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* set the file attribute */
+ fid->attr = attr;
+ p_fs->fs_func->set_entry_attr(ep, attr);
+
+ if (p_fs->vol_type != EXFAT) {
+ buf_modify(sb, sector);
+ } else {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+#endif
+
+static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
+{
+ sector_t sector = 0;
+ s32 count;
+ int ret = FFS_SUCCESS;
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct entry_set_cache_t *es = NULL;
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+
+ pr_debug("%s entered\n", __func__);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (is_dir) {
+ if ((fid->dir.dir == p_fs->root_dir) &&
+ (fid->entry == -1)) {
+ info->Attr = ATTR_SUBDIR;
+ memset((char *)&info->CreateTimestamp, 0,
+ sizeof(struct date_time_t));
+ memset((char *)&info->ModifyTimestamp, 0,
+ sizeof(struct date_time_t));
+ memset((char *)&info->AccessTimestamp, 0,
+ sizeof(struct date_time_t));
+ strcpy(info->ShortName, ".");
+ strcpy(info->Name, ".");
+
+ dir.dir = p_fs->root_dir;
+ dir.flags = 0x01;
+
+ if (p_fs->root_dir == CLUSTER_32(0)) {
+ /* FAT16 root_dir */
+ info->Size = p_fs->dentries_in_root <<
+ DENTRY_SIZE_BITS;
+ } else {
+ info->Size = count_num_clusters(sb, &dir) <<
+ p_fs->cluster_size_bits;
+ }
+
+ count = count_dos_name_entries(sb, &dir, TYPE_DIR);
+ if (count < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ info->NumSubdirs = count;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+
+ /* get the directory entry of given file or directory */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_2_ENTRIES, &ep);
+ if (es == NULL) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep+1;
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ buf_lock(sb, sector);
+ }
+
+ /* set FILE_INFO structure using the acquired struct dentry_t */
+ info->Attr = p_fs->fs_func->get_entry_attr(ep);
+
+ p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ info->CreateTimestamp.Year = tm.year;
+ info->CreateTimestamp.Month = tm.mon;
+ info->CreateTimestamp.Day = tm.day;
+ info->CreateTimestamp.Hour = tm.hour;
+ info->CreateTimestamp.Minute = tm.min;
+ info->CreateTimestamp.Second = tm.sec;
+ info->CreateTimestamp.MilliSecond = 0;
+
+ p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ info->ModifyTimestamp.Year = tm.year;
+ info->ModifyTimestamp.Month = tm.mon;
+ info->ModifyTimestamp.Day = tm.day;
+ info->ModifyTimestamp.Hour = tm.hour;
+ info->ModifyTimestamp.Minute = tm.min;
+ info->ModifyTimestamp.Second = tm.sec;
+ info->ModifyTimestamp.MilliSecond = 0;
+
+ memset((char *) &info->AccessTimestamp, 0, sizeof(struct date_time_t));
+
+ *(uni_name.name) = 0x0;
+ /* XXX this is very bad for exfat cuz name is already included in es.
+ * API should be revised
+ */
+ p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry,
+ uni_name.name);
+ if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
+ get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
+ &uni_name, 0x1);
+ nls_uniname_to_cstring(sb, info->Name, &uni_name);
+
+ if (p_fs->vol_type == EXFAT) {
+ info->NumSubdirs = 2;
+ } else {
+ buf_unlock(sb, sector);
+ get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
+ &uni_name, 0x0);
+ nls_uniname_to_cstring(sb, info->ShortName, &uni_name);
+ info->NumSubdirs = 0;
+ }
+
+ info->Size = p_fs->fs_func->get_entry_size(ep2);
+
+ if (p_fs->vol_type == EXFAT)
+ release_entry_set(es);
+
+ if (is_dir) {
+ dir.dir = fid->start_clu;
+ dir.flags = 0x01;
+
+ if (info->Size == 0)
+ info->Size = (u64)count_num_clusters(sb, &dir) <<
+ p_fs->cluster_size_bits;
+
+ count = count_dos_name_entries(sb, &dir, TYPE_DIR);
+ if (count < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ info->NumSubdirs += count;
+ }
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ pr_debug("%s exited successfully\n", __func__);
+ return ret;
+}
+
+static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
+{
+ sector_t sector = 0;
+ int ret = FFS_SUCCESS;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct entry_set_cache_t *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+
+ pr_debug("%s entered (inode %p info %p\n", __func__, inode, info);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (is_dir) {
+ if ((fid->dir.dir == p_fs->root_dir) &&
+ (fid->entry == -1)) {
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ ret = FFS_SUCCESS;
+ goto out;
+ }
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* get the directory entry of given file or directory */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (es == NULL) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep+1;
+ } else {
+ /* for other than exfat */
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ }
+
+ p_fs->fs_func->set_entry_attr(ep, info->Attr);
+
+ /* set FILE_INFO structure using the acquired struct dentry_t */
+ tm.sec = info->CreateTimestamp.Second;
+ tm.min = info->CreateTimestamp.Minute;
+ tm.hour = info->CreateTimestamp.Hour;
+ tm.day = info->CreateTimestamp.Day;
+ tm.mon = info->CreateTimestamp.Month;
+ tm.year = info->CreateTimestamp.Year;
+ p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE);
+
+ tm.sec = info->ModifyTimestamp.Second;
+ tm.min = info->ModifyTimestamp.Minute;
+ tm.hour = info->ModifyTimestamp.Hour;
+ tm.day = info->ModifyTimestamp.Day;
+ tm.mon = info->ModifyTimestamp.Month;
+ tm.year = info->ModifyTimestamp.Year;
+ p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY);
+
+ p_fs->fs_func->set_entry_size(ep2, info->Size);
+
+ if (p_fs->vol_type != EXFAT) {
+ buf_modify(sb, sector);
+ } else {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ pr_debug("%s exited (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
+{
+ s32 num_clusters, num_alloced;
+ bool modified = false;
+ u32 last_clu;
+ int ret = FFS_SUCCESS;
+ sector_t sector = 0;
+ struct chain_t new_clu;
+ struct dentry_t *ep;
+ struct entry_set_cache_t *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ /* check the validity of pointer parameters */
+ if (clu == NULL)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits;
+
+ if (EXFAT_I(inode)->mmu_private == 0)
+ num_clusters = 0;
+ else
+ num_clusters = (s32)((EXFAT_I(inode)->mmu_private - 1) >>
+ p_fs->cluster_size_bits) + 1;
+
+ *clu = last_clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ if ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ *clu = CLUSTER_32(~0);
+ else
+ *clu += clu_offset;
+ }
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ *clu = fid->hint_last_clu;
+ }
+
+ while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
+ last_clu = *clu;
+ if (FAT_read(sb, *clu, clu) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ clu_offset--;
+ }
+ }
+
+ if (*clu == CLUSTER_32(~0)) {
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) :
+ last_clu + 1;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* (1) allocate a cluster */
+ num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
+ if (num_alloced < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ } else if (num_alloced == 0) {
+ ret = FFS_FULL;
+ goto out;
+ }
+
+ /* (2) append to the FAT chain */
+ if (last_clu == CLUSTER_32(~0)) {
+ if (new_clu.flags == 0x01)
+ fid->flags = 0x01;
+ fid->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != fid->flags) {
+ exfat_chain_cont_cluster(sb, fid->start_clu,
+ num_clusters);
+ fid->flags = 0x01;
+ modified = true;
+ }
+ if (new_clu.flags == 0x01)
+ FAT_write(sb, last_clu, new_clu.dir);
+ }
+
+ num_clusters += num_alloced;
+ *clu = new_clu.dir;
+
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (es == NULL) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ /* get stream entry */
+ ep++;
+ }
+
+ /* (3) update directory entry */
+ if (modified) {
+ if (p_fs->vol_type != EXFAT) {
+ ep = get_entry_in_dir(sb, &(fid->dir),
+ fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+
+ if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
+ p_fs->fs_func->set_entry_flag(ep, fid->flags);
+
+ if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu)
+ p_fs->fs_func->set_entry_clu0(ep,
+ fid->start_clu);
+
+ if (p_fs->vol_type != EXFAT)
+ buf_modify(sb, sector);
+ }
+
+ if (p_fs->vol_type == EXFAT) {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+ /* add number of new blocks to inode */
+ inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9);
+ }
+
+ /* hint information */
+ fid->hint_last_off = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
+ fid->hint_last_clu = *clu;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------*/
+/* Directory Operation Functions */
+/*----------------------------------------------------------------------*/
+
+static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
+{
+ int ret = FFS_SUCCESS;
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ pr_debug("%s entered\n", __func__);
+
+ /* check the validity of pointer parameters */
+ if ((fid == NULL) || (path == NULL) || (*path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check the validity of directory name in the given old pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ ret = create_dir(inode, &dir, &uni_name, fid);
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
+{
+ int i, dentry, clu_offset;
+ int ret = FFS_SUCCESS;
+ s32 dentries_per_clu, dentries_per_clu_bits = 0;
+ u32 type;
+ sector_t sector;
+ struct chain_t dir, clu;
+ struct uni_name_t uni_name;
+ struct timestamp_t tm;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ /* check the validity of pointer parameters */
+ if (dir_entry == NULL)
+ return FFS_ERROR;
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_DIR)
+ return FFS_PERMISSIONERR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (fid->entry == -1) {
+ dir.dir = p_fs->root_dir;
+ dir.flags = 0x01;
+ } else {
+ dir.dir = fid->start_clu;
+ dir.size = (s32)(fid->size >> p_fs->cluster_size_bits);
+ dir.flags = fid->flags;
+ }
+
+ dentry = (s32)fid->rwoffset;
+
+ if (dir.dir == CLUSTER_32(0)) {
+ /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+
+ if (dentry == dentries_per_clu) {
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ clu.dir = dir.dir;
+ clu.size = dir.size;
+ clu.flags = dir.flags;
+ }
+ } else {
+ dentries_per_clu = p_fs->dentries_per_clu;
+ dentries_per_clu_bits = ilog2(dentries_per_clu);
+
+ clu_offset = dentry >> dentries_per_clu_bits;
+ clu.dir = dir.dir;
+ clu.size = dir.size;
+ clu.flags = dir.flags;
+
+ if (clu.flags == 0x03) {
+ clu.dir += clu_offset;
+ clu.size -= clu_offset;
+ } else {
+ /* hint_information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ clu.dir = fid->hint_last_clu;
+ }
+
+ while (clu_offset > 0) {
+ /* clu.dir = FAT_read(sb, clu.dir); */
+ if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ clu_offset--;
+ }
+ }
+ }
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (dir.dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ i = dentry % dentries_per_clu;
+ else
+ i = dentry & (dentries_per_clu-1);
+
+ for ( ; i < dentries_per_clu; i++, dentry++) {
+ ep = get_entry_in_dir(sb, &clu, i, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ type = fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ buf_lock(sb, sector);
+ dir_entry->Attr = fs_func->get_entry_attr(ep);
+
+ fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ dir_entry->CreateTimestamp.Year = tm.year;
+ dir_entry->CreateTimestamp.Month = tm.mon;
+ dir_entry->CreateTimestamp.Day = tm.day;
+ dir_entry->CreateTimestamp.Hour = tm.hour;
+ dir_entry->CreateTimestamp.Minute = tm.min;
+ dir_entry->CreateTimestamp.Second = tm.sec;
+ dir_entry->CreateTimestamp.MilliSecond = 0;
+
+ fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ dir_entry->ModifyTimestamp.Year = tm.year;
+ dir_entry->ModifyTimestamp.Month = tm.mon;
+ dir_entry->ModifyTimestamp.Day = tm.day;
+ dir_entry->ModifyTimestamp.Hour = tm.hour;
+ dir_entry->ModifyTimestamp.Minute = tm.min;
+ dir_entry->ModifyTimestamp.Second = tm.sec;
+ dir_entry->ModifyTimestamp.MilliSecond = 0;
+
+ memset((char *)&dir_entry->AccessTimestamp, 0,
+ sizeof(struct date_time_t));
+
+ *(uni_name.name) = 0x0;
+ fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
+ uni_name.name);
+ if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
+ get_uni_name_from_dos_entry(sb,
+ (struct dos_dentry_t *)ep,
+ &uni_name, 0x1);
+ nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
+ buf_unlock(sb, sector);
+
+ if (p_fs->vol_type == EXFAT) {
+ ep = get_entry_in_dir(sb, &clu, i+1, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ } else {
+ get_uni_name_from_dos_entry(sb,
+ (struct dos_dentry_t *)ep,
+ &uni_name, 0x0);
+ nls_uniname_to_cstring(sb, dir_entry->ShortName,
+ &uni_name);
+ }
+
+ dir_entry->Size = fs_func->get_entry_size(ep);
+
+ /* hint information */
+ if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */
+ } else {
+ fid->hint_last_off = dentry >>
+ dentries_per_clu_bits;
+ fid->hint_last_clu = clu.dir;
+ }
+
+ fid->rwoffset = (s64) ++dentry;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ if (dir.dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ /* clu.dir = FAT_read(sb, clu.dir); */
+ if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+ }
+
+ *(dir_entry->Name) = '\0';
+
+ fid->rwoffset = (s64) ++dentry;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
+{
+ s32 dentry;
+ int ret = FFS_SUCCESS;
+ struct chain_t dir, clu_to_free;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* check the validity of the given file id */
+ if (fid == NULL)
+ return FFS_INVALIDFID;
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ /* check if the file is "." or ".." */
+ if (p_fs->vol_type != EXFAT) {
+ if ((dir.dir != p_fs->root_dir) && (dentry < 2))
+ return FFS_PERMISSIONERR;
+ }
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ clu_to_free.dir = fid->start_clu;
+ clu_to_free.size = (s32)((fid->size-1) >> p_fs->cluster_size_bits) + 1;
+ clu_to_free.flags = fid->flags;
+
+ if (!is_dir_empty(sb, &clu_to_free)) {
+ ret = FFS_FILEEXIST;
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ remove_file(inode, &dir, dentry);
+
+ /* (2) free the clusters */
+ p_fs->fs_func->free_cluster(sb, &clu_to_free, 1);
+
+ fid->size = 0;
+ fid->start_clu = CLUSTER_32(~0);
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+/*======================================================================*/
+/* Directory Entry Operations */
+/*======================================================================*/
+
+static int exfat_readdir(struct file *filp, struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(filp);
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct dir_entry_t de;
+ unsigned long inum;
+ loff_t cpos;
+ int err = 0;
+
+ __lock_super(sb);
+
+ cpos = ctx->pos;
+ /* Fake . and .. for the root directory. */
+ if ((p_fs->vol_type == EXFAT) || (inode->i_ino == EXFAT_ROOT_INO)) {
+ while (cpos < 2) {
+ if (inode->i_ino == EXFAT_ROOT_INO)
+ inum = EXFAT_ROOT_INO;
+ else if (cpos == 0)
+ inum = inode->i_ino;
+ else /* (cpos == 1) */
+ inum = parent_ino(filp->f_path.dentry);
+
+ if (!dir_emit_dots(filp, ctx))
+ goto out;
+ cpos++;
+ ctx->pos++;
+ }
+ if (cpos == 2)
+ cpos = 0;
+ }
+ if (cpos & (DENTRY_SIZE - 1)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+get_new:
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+ EXFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS;
+
+ err = ffsReadDir(inode, &de);
+ if (err) {
+ /* at least we tried to read a sector
+ * move cpos to next sector position (should be aligned)
+ */
+ if (err == FFS_MEDIAERR) {
+ cpos += 1 << p_bd->sector_size_bits;
+ cpos &= ~((1 << p_bd->sector_size_bits)-1);
+ }
+
+ err = -EIO;
+ goto end_of_dir;
+ }
+
+ cpos = EXFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS;
+
+ if (!de.Name[0])
+ goto end_of_dir;
+
+ if (!memcmp(de.ShortName, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = inode->i_ino;
+ } else if (!memcmp(de.ShortName, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = parent_ino(filp->f_path.dentry);
+ } else {
+ loff_t i_pos = ((loff_t) EXFAT_I(inode)->fid.start_clu << 32) |
+ ((EXFAT_I(inode)->fid.rwoffset-1) & 0xffffffff);
+ struct inode *tmp = exfat_iget(sb, i_pos);
+
+ if (tmp) {
+ inum = tmp->i_ino;
+ iput(tmp);
+ } else {
+ inum = iunique(sb, EXFAT_ROOT_INO);
+ }
+ }
+
+ if (!dir_emit(ctx, de.Name, strlen(de.Name), inum,
+ (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
+ goto out;
+
+ ctx->pos = cpos;
+ goto get_new;
+
+end_of_dir:
+ ctx->pos = cpos;
+out:
+ __unlock_super(sb);
+ return err;
+}
+
+static int exfat_ioctl_volume_id(struct inode *dir)
+{
+ struct super_block *sb = dir->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+
+ return p_fs->vol_id;
+}
+
+static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+struct inode *inode = filp->f_path.dentry->d_inode;
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ unsigned int flags;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ switch (cmd) {
+ case EXFAT_IOCTL_GET_VOLUME_ID:
+ return exfat_ioctl_volume_id(inode);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ case EXFAT_IOC_GET_DEBUGFLAGS: {
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ flags = sbi->debug_flags;
+ return put_user(flags, (int __user *)arg);
+ }
+ case EXFAT_IOC_SET_DEBUGFLAGS: {
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (int __user *) arg))
+ return -EFAULT;
+
+ __lock_super(sb);
+ sbi->debug_flags = flags;
+ __unlock_super(sb);
+
+ return 0;
+ }
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+ default:
+ return -ENOTTY; /* Inappropriate ioctl for device */
+ }
+}
+
+static const struct file_operations exfat_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate = exfat_readdir,
+ .unlocked_ioctl = exfat_generic_ioctl,
+ .fsync = generic_file_fsync,
+};
+
+static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct file_id_t fid;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ err = ffsCreateFile(dir, (u8 *) dentry->d_name.name, FM_REGULAR, &fid);
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else if (err == FFS_NAMETOOLONG)
+ err = -ENAMETOOLONG;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void) exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ INC_IVERSION(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ /*
+ * timestamp is already written, so mark_inode_dirty() is unnecessary.
+ */
+
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_find(struct inode *dir, struct qstr *qname,
+ struct file_id_t *fid)
+{
+ int err;
+
+ if (qname->len == 0)
+ return -ENOENT;
+
+ err = ffsLookupFile(dir, (u8 *) qname->name, fid);
+ if (err)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int exfat_d_anon_disconn(struct dentry *dentry)
+{
+ return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct dentry *alias;
+ int err;
+ struct file_id_t fid;
+ loff_t i_pos;
+ u64 ret;
+ mode_t i_mode;
+
+ __lock_super(sb);
+ pr_debug("%s entered\n", __func__);
+ err = exfat_find(dir, &dentry->d_name, &fid);
+ if (err) {
+ if (err == -ENOENT) {
+ inode = NULL;
+ goto out;
+ }
+ goto error;
+ }
+
+ i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+
+ i_mode = inode->i_mode;
+ if (S_ISLNK(i_mode) && !EXFAT_I(inode)->target) {
+ EXFAT_I(inode)->target = kmalloc(i_size_read(inode) + 1,
+ GFP_KERNEL);
+ if (!EXFAT_I(inode)->target) {
+ err = -ENOMEM;
+ goto error;
+ }
+ ffsReadFile(dir, &fid, EXFAT_I(inode)->target,
+ i_size_read(inode), &ret);
+ *(EXFAT_I(inode)->target + i_size_read(inode)) = '\0';
+ }
+
+ alias = d_find_alias(inode);
+ if (alias && !exfat_d_anon_disconn(alias)) {
+ BUG_ON(d_unhashed(alias));
+ if (!S_ISDIR(i_mode))
+ d_move(alias, dentry);
+ iput(inode);
+ __unlock_super(sb);
+ pr_debug("%s exited 1\n", __func__);
+ return alias;
+ }
+ dput(alias);
+out:
+ __unlock_super(sb);
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ dentry = d_splice_alias(inode, dentry);
+ if (dentry)
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ pr_debug("%s exited 2\n", __func__);
+ return dentry;
+
+error:
+ __unlock_super(sb);
+ pr_debug("%s exited 3\n", __func__);
+ return ERR_PTR(err);
+}
+
+static inline unsigned long exfat_hash(loff_t i_pos)
+{
+ return hash_32(i_pos, EXFAT_HASH_BITS);
+}
+
+static void exfat_attach(struct inode *inode, loff_t i_pos)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
+
+ spin_lock(&sbi->inode_hash_lock);
+ EXFAT_I(inode)->i_pos = i_pos;
+ hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head);
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+static void exfat_detach(struct inode *inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_del_init(&EXFAT_I(inode)->i_hash_fat);
+ EXFAT_I(inode)->i_pos = 0;
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+static int exfat_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid));
+ if (err) {
+ if (err == FFS_PERMISSIONERR)
+ err = -EPERM;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void) exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = current_time(inode);
+ exfat_detach(inode);
+ remove_inode_hash(inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_symlink(struct inode *dir, struct dentry *dentry,
+ const char *target)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct file_id_t fid;
+ loff_t i_pos;
+ int err;
+ u64 len = (u64) strlen(target);
+ u64 ret;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ err = ffsCreateFile(dir, (u8 *) dentry->d_name.name, FM_SYMLINK, &fid);
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else
+ err = -EIO;
+ goto out;
+ }
+
+ err = ffsWriteFile(dir, &fid, (char *) target, len, &ret);
+
+ if (err) {
+ ffsRemoveFile(dir, &fid);
+
+ if (err == FFS_FULL)
+ err = -ENOSPC;
+ else
+ err = -EIO;
+ goto out;
+ }
+
+ INC_IVERSION(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void) exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ INC_IVERSION(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ EXFAT_I(inode)->target = kmalloc(len+1, GFP_KERNEL);
+ if (!EXFAT_I(inode)->target) {
+ err = -ENOMEM;
+ goto out;
+ }
+ memcpy(EXFAT_I(inode)->target, target, len+1);
+
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct file_id_t fid;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ err = ffsCreateDir(dir, (u8 *) dentry->d_name.name, &fid);
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else if (err == FFS_NAMETOOLONG)
+ err = -ENAMETOOLONG;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void) exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ inc_nlink(dir);
+
+ i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ INC_IVERSION(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid));
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -ENOTEMPTY;
+ else if (err == FFS_NOTFOUND)
+ err = -ENOENT;
+ else if (err == FFS_DIRBUSY)
+ err = -EBUSY;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void) exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ drop_nlink(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = current_time(inode);
+ exfat_detach(inode);
+ remove_inode_hash(inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *old_inode, *new_inode;
+ struct super_block *sb = old_dir->i_sb;
+ loff_t i_pos;
+ int err;
+
+ if (flags)
+ return -EINVAL;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ old_inode = old_dentry->d_inode;
+ new_inode = new_dentry->d_inode;
+
+ EXFAT_I(old_inode)->fid.size = i_size_read(old_inode);
+
+ err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir,
+ new_dentry);
+ if (err) {
+ if (err == FFS_PERMISSIONERR)
+ err = -EPERM;
+ else if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_NOTFOUND)
+ err = -ENOENT;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(new_dir);
+ new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
+ current_time(new_dir);
+ if (IS_DIRSYNC(new_dir))
+ (void) exfat_sync_inode(new_dir);
+ else
+ mark_inode_dirty(new_dir);
+
+ i_pos = ((loff_t) EXFAT_I(old_inode)->fid.dir.dir << 32) |
+ (EXFAT_I(old_inode)->fid.entry & 0xffffffff);
+
+ exfat_detach(old_inode);
+ exfat_attach(old_inode, i_pos);
+ if (IS_DIRSYNC(new_dir))
+ (void) exfat_sync_inode(old_inode);
+ else
+ mark_inode_dirty(old_inode);
+
+ if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) {
+ drop_nlink(old_dir);
+ if (!new_inode)
+ inc_nlink(new_dir);
+ }
+ INC_IVERSION(old_dir);
+ old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ if (IS_DIRSYNC(old_dir))
+ (void) exfat_sync_inode(old_dir);
+ else
+ mark_inode_dirty(old_dir);
+
+ if (new_inode) {
+ exfat_detach(new_inode);
+ drop_nlink(new_inode);
+ if (S_ISDIR(new_inode->i_mode))
+ drop_nlink(new_inode);
+ new_inode->i_ctime = current_time(new_inode);
+ }
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ int err, err2;
+
+ err = generic_cont_expand_simple(inode, size);
+ if (err != 0)
+ return err;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ mark_inode_dirty(inode);
+
+ if (IS_SYNC(inode)) {
+ err = filemap_fdatawrite_range(mapping, start,
+ start + count - 1);
+ err2 = sync_mapping_buffers(mapping);
+ err = (err) ? (err) : (err2);
+ err2 = write_inode_now(inode, 1);
+ err = (err) ? (err) : (err2);
+ if (!err)
+ err = filemap_fdatawait_range(mapping, start,
+ start + count - 1);
+ }
+ return err;
+}
+
+static int exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
+{
+ mode_t allow_utime = sbi->options.allow_utime;
+
+ if (!uid_eq(current_fsuid(), inode->i_uid)) {
+ if (in_group_p(inode->i_gid))
+ allow_utime >>= 3;
+ if (allow_utime & MAY_WRITE)
+ return 1;
+ }
+
+ /* use a default check */
+ return 0;
+}
+
+static int exfat_sanitize_mode(const struct exfat_sb_info *sbi,
+ struct inode *inode, umode_t *mode_ptr)
+{
+ mode_t i_mode, mask, perm;
+
+ i_mode = inode->i_mode;
+
+ if (S_ISREG(i_mode) || S_ISLNK(i_mode))
+ mask = sbi->options.fs_fmask;
+ else
+ mask = sbi->options.fs_dmask;
+
+ perm = *mode_ptr & ~(S_IFMT | mask);
+
+ /* Of the r and x bits, all (subject to umask) must be present.*/
+ if ((perm & 0555) != (i_mode & 0555))
+ return -EPERM;
+
+ if (exfat_mode_can_hold_ro(inode)) {
+ /*
+ * Of the w bits, either all (subject to umask) or none must be
+ * present.
+ */
+ if ((perm & 0222) && ((perm & 0222) != (0222 & ~mask)))
+ return -EPERM;
+ } else {
+ /*
+ * If exfat_mode_can_hold_ro(inode) is false, can't change w
+ * bits.
+ */
+ if ((perm & 0222) != (0222 & ~mask))
+ return -EPERM;
+ }
+
+ *mode_ptr &= S_IFMT | perm;
+
+ return 0;
+}
+
+static void exfat_truncate(struct inode *inode, loff_t old_size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ int err;
+
+ __lock_super(sb);
+
+ /*
+ * This protects against truncating a file bigger than it was then
+ * trying to write into the hole.
+ */
+ if (EXFAT_I(inode)->mmu_private > i_size_read(inode))
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+
+ if (EXFAT_I(inode)->fid.start_clu == 0)
+ goto out;
+
+ err = ffsTruncateFile(inode, old_size, i_size_read(inode));
+ if (err)
+ goto out;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ if (IS_DIRSYNC(inode))
+ (void) exfat_sync_inode(inode);
+ else
+ mark_inode_dirty(inode);
+
+ inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) &
+ ~((loff_t)p_fs->cluster_size - 1)) >> 9;
+out:
+ __unlock_super(sb);
+}
+
+static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ unsigned int ia_valid;
+ int error;
+ loff_t old_size;
+
+ pr_debug("%s entered\n", __func__);
+
+ if ((attr->ia_valid & ATTR_SIZE)
+ && (attr->ia_size > i_size_read(inode))) {
+ error = exfat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ return error;
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+
+ ia_valid = attr->ia_valid;
+
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET))
+ && exfat_allow_set_time(sbi, inode)) {
+ attr->ia_valid &= ~(ATTR_MTIME_SET |
+ ATTR_ATIME_SET |
+ ATTR_TIMES_SET);
+ }
+
+ error = setattr_prepare(dentry, attr);
+ attr->ia_valid = ia_valid;
+ if (error)
+ return error;
+
+ if (((attr->ia_valid & ATTR_UID) &&
+ (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
+ ((attr->ia_valid & ATTR_MODE) &&
+ (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) {
+ return -EPERM;
+ }
+
+ /*
+ * We don't return -EPERM here. Yes, strange, but this is too
+ * old behavior.
+ */
+ if (attr->ia_valid & ATTR_MODE) {
+ if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
+ attr->ia_valid &= ~ATTR_MODE;
+ }
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ if (attr->ia_valid & ATTR_SIZE) {
+ old_size = i_size_read(inode);
+ down_write(&EXFAT_I(inode)->truncate_lock);
+ truncate_setsize(inode, attr->ia_size);
+ exfat_truncate(inode, old_size);
+ up_write(&EXFAT_I(inode)->truncate_lock);
+ }
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+ pr_debug("%s exited\n", __func__);
+ return error;
+}
+
+static int exfat_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
+{
+ struct inode *inode = path->dentry->d_inode;
+
+ pr_debug("%s entered\n", __func__);
+
+ generic_fillattr(inode, stat);
+ stat->blksize = EXFAT_SB(inode->i_sb)->fs_info.cluster_size;
+
+ pr_debug("%s exited\n", __func__);
+ return 0;
+}
+
+static const struct inode_operations exfat_dir_inode_operations = {
+ .create = exfat_create,
+ .lookup = exfat_lookup,
+ .unlink = exfat_unlink,
+ .symlink = exfat_symlink,
+ .mkdir = exfat_mkdir,
+ .rmdir = exfat_rmdir,
+ .rename = exfat_rename,
+ .setattr = exfat_setattr,
+ .getattr = exfat_getattr,
+};
+
+/*======================================================================*/
+/* File Operations */
+/*======================================================================*/
+static const char *exfat_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ if (ei->target != NULL) {
+ char *cookie = ei->target;
+
+ if (cookie != NULL)
+ return (char *)(ei->target);
+ }
+ return NULL;
+}
+
+static const struct inode_operations exfat_symlink_inode_operations = {
+ .get_link = exfat_get_link,
+};
+
+static int exfat_file_release(struct inode *inode, struct file *filp)
+{
+ struct super_block *sb = inode->i_sb;
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+ ffsSyncVol(sb, false);
+ return 0;
+}
+
+static const struct file_operations exfat_file_operations = {
+ .llseek = generic_file_llseek,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .release = exfat_file_release,
+ .unlocked_ioctl = exfat_generic_ioctl,
+ .fsync = generic_file_fsync,
+ .splice_read = generic_file_splice_read,
+};
+
+static const struct inode_operations exfat_file_inode_operations = {
+ .setattr = exfat_setattr,
+ .getattr = exfat_getattr,
+};
+
+/*======================================================================*/
+/* Address Space Operations */
+/*======================================================================*/
+
+static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
+ unsigned long *mapped_blocks, int *create)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct bd_info_t *p_bd = &(sbi->bd_info);
+ const unsigned long blocksize = sb->s_blocksize;
+ const unsigned char blocksize_bits = sb->s_blocksize_bits;
+ sector_t last_block;
+ int err, clu_offset, sec_offset;
+ unsigned int cluster;
+
+ *phys = 0;
+ *mapped_blocks = 0;
+
+ if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) {
+ if (inode->i_ino == EXFAT_ROOT_INO) {
+ if (sector <
+ (p_fs->dentries_in_root >>
+ (p_bd->sector_size_bits-DENTRY_SIZE_BITS))) {
+ *phys = sector + p_fs->root_start_sector;
+ *mapped_blocks = 1;
+ }
+ return 0;
+ }
+ }
+
+ last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
+ if (sector >= last_block) {
+ if (*create == 0)
+ return 0;
+ } else {
+ *create = 0;
+ }
+
+ /* cluster offset */
+ clu_offset = sector >> p_fs->sectors_per_clu_bits;
+
+ /* sector offset in cluster */
+ sec_offset = sector & (p_fs->sectors_per_clu - 1);
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = ffsMapCluster(inode, clu_offset, &cluster);
+
+ if (err) {
+ if (err == FFS_FULL)
+ return -ENOSPC;
+ else
+ return -EIO;
+ } else if (cluster != CLUSTER_32(~0)) {
+ *phys = START_SECTOR(cluster) + sec_offset;
+ *mapped_blocks = p_fs->sectors_per_clu - sec_offset;
+ }
+
+ return 0;
+}
+
+static int exfat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ int err;
+ unsigned long mapped_blocks;
+ sector_t phys;
+
+ __lock_super(sb);
+
+ err = exfat_bmap(inode, iblock, &phys, &mapped_blocks, &create);
+ if (err) {
+ __unlock_super(sb);
+ return err;
+ }
+
+ if (phys) {
+ max_blocks = min(mapped_blocks, max_blocks);
+ if (create) {
+ EXFAT_I(inode)->mmu_private += max_blocks <<
+ sb->s_blocksize_bits;
+ set_buffer_new(bh_result);
+ }
+ map_bh(bh_result, sb, phys);
+ }
+
+ bh_result->b_size = max_blocks << sb->s_blocksize_bits;
+ __unlock_super(sb);
+
+ return 0;
+}
+
+static int exfat_readpage(struct file *file, struct page *page)
+{
+ return mpage_readpage(page, exfat_get_block);
+}
+
+static int exfat_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
+}
+
+static int exfat_writepage(struct page *page, struct writeback_control *wbc)
+{
+ return block_write_full_page(page, exfat_get_block, wbc);
+}
+
+static int exfat_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return mpage_writepages(mapping, wbc, exfat_get_block);
+}
+
+static void exfat_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > i_size_read(inode)) {
+ truncate_pagecache(inode, i_size_read(inode));
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+ exfat_truncate(inode, i_size_read(inode));
+ }
+}
+
+static int exfat_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int flags,
+ struct page **pagep, void **fsdata)
+{
+ int ret;
+
+ *pagep = NULL;
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ exfat_get_block,
+ &EXFAT_I(mapping->host)->mmu_private);
+
+ if (ret < 0)
+ exfat_write_failed(mapping, pos+len);
+ return ret;
+}
+
+static int exfat_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *pagep, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ int err;
+
+ err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+
+ if (err < len)
+ exfat_write_failed(mapping, pos+len);
+
+ if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ fid->attr |= ATTR_ARCHIVE;
+ mark_inode_dirty(inode);
+ }
+ return err;
+}
+
+static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ ssize_t ret;
+ int rw;
+
+ rw = iov_iter_rw(iter);
+
+ if (rw == WRITE) {
+ if (EXFAT_I(inode)->mmu_private < iov_iter_count(iter))
+ return 0;
+ }
+ ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
+
+ if ((ret < 0) && (rw & WRITE))
+ exfat_write_failed(mapping, iov_iter_count(iter));
+ return ret;
+}
+
+static sector_t _exfat_bmap(struct address_space *mapping, sector_t block)
+{
+ sector_t blocknr;
+
+ /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */
+ down_read(&EXFAT_I(mapping->host)->truncate_lock);
+ blocknr = generic_block_bmap(mapping, block, exfat_get_block);
+ up_read(&EXFAT_I(mapping->host)->truncate_lock);
+
+ return blocknr;
+}
+
+static const struct address_space_operations exfat_aops = {
+ .readpage = exfat_readpage,
+ .readpages = exfat_readpages,
+ .writepage = exfat_writepage,
+ .writepages = exfat_writepages,
+ .write_begin = exfat_write_begin,
+ .write_end = exfat_write_end,
+ .direct_IO = exfat_direct_IO,
+ .bmap = _exfat_bmap
+};
+
+/*======================================================================*/
+/* Super Operations */
+/*======================================================================*/
+
+static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *info;
+ struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
+ struct inode *inode = NULL;
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_for_each_entry(info, head, i_hash_fat) {
+ BUG_ON(info->vfs_inode.i_sb != sb);
+
+ if (i_pos != info->i_pos)
+ continue;
+ inode = igrab(&info->vfs_inode);
+ if (inode)
+ break;
+ }
+ spin_unlock(&sbi->inode_hash_lock);
+ return inode;
+}
+
+/* doesn't deal with root inode */
+static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct dir_entry_t info;
+
+ memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t));
+
+ ffsReadStat(inode, &info);
+
+ EXFAT_I(inode)->i_pos = 0;
+ EXFAT_I(inode)->target = NULL;
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ INC_IVERSION(inode);
+ inode->i_generation = get_seconds();
+
+ if (info.Attr & ATTR_SUBDIR) { /* directory */
+ inode->i_generation &= ~1;
+ inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
+ inode->i_op = &exfat_dir_inode_operations;
+ inode->i_fop = &exfat_dir_operations;
+
+ i_size_write(inode, info.Size);
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+ set_nlink(inode, info.NumSubdirs);
+ } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */
+ inode->i_generation |= 1;
+ inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
+ inode->i_op = &exfat_symlink_inode_operations;
+
+ i_size_write(inode, info.Size);
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+ } else { /* regular file */
+ inode->i_generation |= 1;
+ inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
+ inode->i_op = &exfat_file_inode_operations;
+ inode->i_fop = &exfat_file_operations;
+ inode->i_mapping->a_ops = &exfat_aops;
+ inode->i_mapping->nrpages = 0;
+
+ i_size_write(inode, info.Size);
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+ }
+ exfat_save_attr(inode, info.Attr);
+
+ inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1))
+ & ~((loff_t)p_fs->cluster_size - 1)) >> 9;
+
+ exfat_time_fat2unix(sbi, &inode->i_mtime, &info.ModifyTimestamp);
+ exfat_time_fat2unix(sbi, &inode->i_ctime, &info.CreateTimestamp);
+ exfat_time_fat2unix(sbi, &inode->i_atime, &info.AccessTimestamp);
+
+ return 0;
+}
+
+static struct inode *exfat_build_inode(struct super_block *sb,
+ struct file_id_t *fid, loff_t i_pos)
+{
+ struct inode *inode;
+ int err;
+
+ inode = exfat_iget(sb, i_pos);
+ if (inode)
+ goto out;
+ inode = new_inode(sb);
+ if (!inode) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
+ SET_IVERSION(inode, 1);
+ err = exfat_fill_inode(inode, fid);
+ if (err) {
+ iput(inode);
+ inode = ERR_PTR(err);
+ goto out;
+ }
+ exfat_attach(inode, i_pos);
+ insert_inode_hash(inode);
+out:
+ return inode;
+}
+
+static int exfat_sync_inode(struct inode *inode)
+{
+ return exfat_write_inode(inode, NULL);
+}
+
+static struct inode *exfat_alloc_inode(struct super_block *sb)
+{
+ struct exfat_inode_info *ei;
+
+ ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS);
+ if (!ei)
+ return NULL;
+
+ init_rwsem(&ei->truncate_lock);
+
+ return &ei->vfs_inode;
+}
+
+static void exfat_destroy_inode(struct inode *inode)
+{
+ if (EXFAT_I(inode)->target)
+ kfree(EXFAT_I(inode)->target);
+ EXFAT_I(inode)->target = NULL;
+
+ kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode));
+}
+
+static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct dir_entry_t info;
+
+ if (inode->i_ino == EXFAT_ROOT_INO)
+ return 0;
+
+ info.Attr = exfat_make_attr(inode);
+ info.Size = i_size_read(inode);
+
+ exfat_time_unix2fat(sbi, &inode->i_mtime, &info.ModifyTimestamp);
+ exfat_time_unix2fat(sbi, &inode->i_ctime, &info.CreateTimestamp);
+ exfat_time_unix2fat(sbi, &inode->i_atime, &info.AccessTimestamp);
+
+ ffsWriteStat(inode, &info);
+
+ return 0;
+}
+
+static void exfat_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+
+ if (!inode->i_nlink)
+ i_size_write(inode, 0);
+ invalidate_inode_buffers(inode);
+ clear_inode(inode);
+ exfat_detach(inode);
+
+ remove_inode_hash(inode);
+}
+
+static void exfat_free_super(struct exfat_sb_info *sbi)
+{
+ if (sbi->nls_disk)
+ unload_nls(sbi->nls_disk);
+ if (sbi->nls_io)
+ unload_nls(sbi->nls_io);
+ if (sbi->options.iocharset != exfat_default_iocharset)
+ kfree(sbi->options.iocharset);
+ /* mutex_init is in exfat_fill_super function. only for 3.7+ */
+ mutex_destroy(&sbi->s_lock);
+ kfree(sbi);
+}
+
+static void exfat_put_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ if (__is_sb_dirty(sb))
+ exfat_write_super(sb);
+
+ ffsUmountVol(sb);
+
+ sb->s_fs_info = NULL;
+ exfat_free_super(sbi);
+}
+
+static void exfat_write_super(struct super_block *sb)
+{
+ __lock_super(sb);
+
+ __set_sb_clean(sb);
+
+ if (!sb_rdonly(sb))
+ ffsSyncVol(sb, true);
+
+ __unlock_super(sb);
+}
+
+static int exfat_sync_fs(struct super_block *sb, int wait)
+{
+ int err = 0;
+
+ if (__is_sb_dirty(sb)) {
+ __lock_super(sb);
+ __set_sb_clean(sb);
+ err = ffsSyncVol(sb, true);
+ __unlock_super(sb);
+ }
+
+ return err;
+}
+
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct vol_info_t info;
+
+ if (p_fs->used_clusters == (u32) ~0) {
+ if (ffsGetVolInfo(sb, &info) == FFS_MEDIAERR)
+ return -EIO;
+
+ } else {
+ info.FatType = p_fs->vol_type;
+ info.ClusterSize = p_fs->cluster_size;
+ info.NumClusters = p_fs->num_clusters - 2;
+ info.UsedClusters = p_fs->used_clusters;
+ info.FreeClusters = info.NumClusters - info.UsedClusters;
+
+ if (p_fs->dev_ejected)
+ pr_info("[EXFAT] statfs on device that is ejected\n");
+ }
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = info.ClusterSize;
+ buf->f_blocks = info.NumClusters;
+ buf->f_bfree = info.FreeClusters;
+ buf->f_bavail = info.FreeClusters;
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ buf->f_namelen = 260;
+
+ return 0;
+}
+
+static int exfat_remount(struct super_block *sb, int *flags, char *data)
+{
+ *flags |= SB_NODIRATIME;
+ return 0;
+}
+
+static int exfat_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(root->d_sb);
+ struct exfat_mount_options *opts = &sbi->options;
+
+ if (__kuid_val(opts->fs_uid))
+ seq_printf(m, ",uid=%u", __kuid_val(opts->fs_uid));
+ if (__kgid_val(opts->fs_gid))
+ seq_printf(m, ",gid=%u", __kgid_val(opts->fs_gid));
+ seq_printf(m, ",fmask=%04o", opts->fs_fmask);
+ seq_printf(m, ",dmask=%04o", opts->fs_dmask);
+ if (opts->allow_utime)
+ seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
+ if (sbi->nls_disk)
+ seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
+ if (sbi->nls_io)
+ seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
+ seq_printf(m, ",namecase=%u", opts->casesensitive);
+ if (opts->errors == EXFAT_ERRORS_CONT)
+ seq_puts(m, ",errors=continue");
+ else if (opts->errors == EXFAT_ERRORS_PANIC)
+ seq_puts(m, ",errors=panic");
+ else
+ seq_puts(m, ",errors=remount-ro");
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard)
+ seq_puts(m, ",discard");
+#endif
+ return 0;
+}
+
+static const struct super_operations exfat_sops = {
+ .alloc_inode = exfat_alloc_inode,
+ .destroy_inode = exfat_destroy_inode,
+ .write_inode = exfat_write_inode,
+ .evict_inode = exfat_evict_inode,
+ .put_super = exfat_put_super,
+ .sync_fs = exfat_sync_fs,
+ .statfs = exfat_statfs,
+ .remount_fs = exfat_remount,
+ .show_options = exfat_show_options,
+};
+
+/*======================================================================*/
+/* Export Operations */
+/*======================================================================*/
+
+static struct inode *exfat_nfs_get_inode(struct super_block *sb, u64 ino,
+ u32 generation)
+{
+ struct inode *inode = NULL;
+
+ if (ino < EXFAT_ROOT_INO)
+ return inode;
+ inode = ilookup(sb, ino);
+
+ if (inode && generation && (inode->i_generation != generation)) {
+ iput(inode);
+ inode = NULL;
+ }
+
+ return inode;
+}
+
+static struct dentry *exfat_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ exfat_nfs_get_inode);
+}
+
+static struct dentry *exfat_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ exfat_nfs_get_inode);
+}
+
+static const struct export_operations exfat_export_ops = {
+ .fh_to_dentry = exfat_fh_to_dentry,
+ .fh_to_parent = exfat_fh_to_parent,
+};
+
+/*======================================================================*/
+/* Super Block Read Operations */
+/*======================================================================*/
+
+enum {
+ Opt_uid,
+ Opt_gid,
+ Opt_umask,
+ Opt_dmask,
+ Opt_fmask,
+ Opt_allow_utime,
+ Opt_codepage,
+ Opt_charset,
+ Opt_namecase,
+ Opt_debug,
+ Opt_err_cont,
+ Opt_err_panic,
+ Opt_err_ro,
+ Opt_utf8_hack,
+ Opt_err,
+#ifdef CONFIG_EXFAT_DISCARD
+ Opt_discard,
+#endif /* EXFAT_CONFIG_DISCARD */
+};
+
+static const match_table_t exfat_tokens = {
+ {Opt_uid, "uid=%u"},
+ {Opt_gid, "gid=%u"},
+ {Opt_umask, "umask=%o"},
+ {Opt_dmask, "dmask=%o"},
+ {Opt_fmask, "fmask=%o"},
+ {Opt_allow_utime, "allow_utime=%o"},
+ {Opt_codepage, "codepage=%u"},
+ {Opt_charset, "iocharset=%s"},
+ {Opt_namecase, "namecase=%u"},
+ {Opt_debug, "debug"},
+ {Opt_err_cont, "errors=continue"},
+ {Opt_err_panic, "errors=panic"},
+ {Opt_err_ro, "errors=remount-ro"},
+ {Opt_utf8_hack, "utf8"},
+#ifdef CONFIG_EXFAT_DISCARD
+ {Opt_discard, "discard"},
+#endif /* CONFIG_EXFAT_DISCARD */
+ {Opt_err, NULL}
+};
+
+static int parse_options(char *options, int silent, int *debug,
+ struct exfat_mount_options *opts)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+ char *iocharset;
+
+ opts->fs_uid = current_uid();
+ opts->fs_gid = current_gid();
+ opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->allow_utime = (unsigned short) -1;
+ opts->codepage = exfat_default_codepage;
+ opts->iocharset = exfat_default_iocharset;
+ opts->casesensitive = 0;
+ opts->errors = EXFAT_ERRORS_RO;
+#ifdef CONFIG_EXFAT_DISCARD
+ opts->discard = 0;
+#endif
+ *debug = 0;
+
+ if (!options)
+ goto out;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, exfat_tokens, args);
+ switch (token) {
+ case Opt_uid:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->fs_uid = KUIDT_INIT(option);
+ break;
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->fs_gid = KGIDT_INIT(option);
+ break;
+ case Opt_umask:
+ case Opt_dmask:
+ case Opt_fmask:
+ if (match_octal(&args[0], &option))
+ return 0;
+ if (token != Opt_dmask)
+ opts->fs_fmask = option;
+ if (token != Opt_fmask)
+ opts->fs_dmask = option;
+ break;
+ case Opt_allow_utime:
+ if (match_octal(&args[0], &option))
+ return 0;
+ opts->allow_utime = option & 0022;
+ break;
+ case Opt_codepage:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->codepage = option;
+ break;
+ case Opt_charset:
+ if (opts->iocharset != exfat_default_iocharset)
+ kfree(opts->iocharset);
+ iocharset = match_strdup(&args[0]);
+ if (!iocharset)
+ return -ENOMEM;
+ opts->iocharset = iocharset;
+ break;
+ case Opt_namecase:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->casesensitive = option;
+ break;
+ case Opt_err_cont:
+ opts->errors = EXFAT_ERRORS_CONT;
+ break;
+ case Opt_err_panic:
+ opts->errors = EXFAT_ERRORS_PANIC;
+ break;
+ case Opt_err_ro:
+ opts->errors = EXFAT_ERRORS_RO;
+ break;
+ case Opt_debug:
+ *debug = 1;
+ break;
+#ifdef CONFIG_EXFAT_DISCARD
+ case Opt_discard:
+ opts->discard = 1;
+ break;
+#endif /* CONFIG_EXFAT_DISCARD */
+ case Opt_utf8_hack:
+ break;
+ default:
+ if (!silent)
+ pr_err("[EXFAT] Unrecognized mount option %s or missing value\n",
+ p);
+ return -EINVAL;
+ }
+ }
+
+out:
+ if (opts->allow_utime == (unsigned short) -1)
+ opts->allow_utime = ~opts->fs_dmask & 0022;
+
+ return 0;
+}
+
+static void exfat_hash_init(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int i;
+
+ spin_lock_init(&sbi->inode_hash_lock);
+ for (i = 0; i < EXFAT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
+}
+
+static int exfat_read_root(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct dir_entry_t info;
+
+ EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir;
+ EXFAT_I(inode)->fid.dir.flags = 0x01;
+ EXFAT_I(inode)->fid.entry = -1;
+ EXFAT_I(inode)->fid.start_clu = p_fs->root_dir;
+ EXFAT_I(inode)->fid.flags = 0x01;
+ EXFAT_I(inode)->fid.type = TYPE_DIR;
+ EXFAT_I(inode)->fid.rwoffset = 0;
+ EXFAT_I(inode)->fid.hint_last_off = -1;
+
+ EXFAT_I(inode)->target = NULL;
+
+ ffsReadStat(inode, &info);
+
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ INC_IVERSION(inode);
+ inode->i_generation = 0;
+ inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777);
+ inode->i_op = &exfat_dir_inode_operations;
+ inode->i_fop = &exfat_dir_operations;
+
+ i_size_write(inode, info.Size);
+ inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1))
+ & ~((loff_t)p_fs->cluster_size - 1)) >> 9;
+ EXFAT_I(inode)->i_pos = ((loff_t) p_fs->root_dir << 32) | 0xffffffff;
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+
+ exfat_save_attr(inode, ATTR_SUBDIR);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ set_nlink(inode, info.NumSubdirs + 2);
+
+ return 0;
+}
+
+static void setup_dops(struct super_block *sb)
+{
+ if (EXFAT_SB(sb)->options.casesensitive == 0)
+ sb->s_d_op = &exfat_ci_dentry_ops;
+ else
+ sb->s_d_op = &exfat_dentry_ops;
+}
+
+static int exfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode = NULL;
+ struct exfat_sb_info *sbi;
+ int debug, ret;
+ long error;
+ char buf[50];
+
+ /*
+ * GFP_KERNEL is ok here, because while we do hold the
+ * supeblock lock, memory pressure can't call back into
+ * the filesystem, since we're only just about to mount
+ * it and have no inodes etc active!
+ */
+ sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+ mutex_init(&sbi->s_lock);
+ sb->s_fs_info = sbi;
+ sb->s_flags |= SB_NODIRATIME;
+ sb->s_magic = EXFAT_SUPER_MAGIC;
+ sb->s_op = &exfat_sops;
+ sb->s_export_op = &exfat_export_ops;
+
+ error = parse_options(data, silent, &debug, &sbi->options);
+ if (error)
+ goto out_fail;
+
+ setup_dops(sb);
+
+ error = -EIO;
+ sb_min_blocksize(sb, 512);
+ sb->s_maxbytes = 0x7fffffffffffffffLL; /* maximum file size */
+
+ ret = ffsMountVol(sb);
+ if (ret) {
+ if (!silent)
+ pr_err("[EXFAT] ffsMountVol failed\n");
+
+ goto out_fail;
+ }
+
+ /* set up enough so that it can read an inode */
+ exfat_hash_init(sb);
+
+ /*
+ * The low byte of FAT's first entry must have same value with
+ * media-field. But in real world, too many devices is
+ * writing wrong value. So, removed that validity check.
+ *
+ * if (FAT_FIRST_ENT(sb, media) != first)
+ */
+
+ /* codepage is not meaningful in exfat */
+ if (sbi->fs_info.vol_type != EXFAT) {
+ error = -EINVAL;
+ sprintf(buf, "cp%d", sbi->options.codepage);
+ sbi->nls_disk = load_nls(buf);
+ if (!sbi->nls_disk) {
+ pr_err("[EXFAT] Codepage %s not found\n", buf);
+ goto out_fail2;
+ }
+ }
+
+ sbi->nls_io = load_nls(sbi->options.iocharset);
+
+ error = -ENOMEM;
+ root_inode = new_inode(sb);
+ if (!root_inode)
+ goto out_fail2;
+ root_inode->i_ino = EXFAT_ROOT_INO;
+ SET_IVERSION(root_inode, 1);
+
+ error = exfat_read_root(root_inode);
+ if (error < 0)
+ goto out_fail2;
+ error = -ENOMEM;
+ exfat_attach(root_inode, EXFAT_I(root_inode)->i_pos);
+ insert_inode_hash(root_inode);
+ sb->s_root = d_make_root(root_inode);
+ if (!sb->s_root) {
+ pr_err("[EXFAT] Getting the root inode failed\n");
+ goto out_fail2;
+ }
+
+ return 0;
+
+out_fail2:
+ ffsUmountVol(sb);
+out_fail:
+ if (root_inode)
+ iput(root_inode);
+ sb->s_fs_info = NULL;
+ exfat_free_super(sbi);
+ return error;
+}
+
+static struct dentry *exfat_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, exfat_fill_super);
+}
+
+static void init_once(void *foo)
+{
+ struct exfat_inode_info *ei = (struct exfat_inode_info *)foo;
+
+ INIT_HLIST_NODE(&ei->i_hash_fat);
+ inode_init_once(&ei->vfs_inode);
+}
+
+static int __init exfat_init_inodecache(void)
+{
+ exfat_inode_cachep = kmem_cache_create("exfat_inode_cache",
+ sizeof(struct exfat_inode_info),
+ 0,
+ (SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (exfat_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit exfat_destroy_inodecache(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(exfat_inode_cachep);
+}
+
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+static void exfat_debug_kill_sb(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct block_device *bdev = sb->s_bdev;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ long flags;
+
+ if (sbi) {
+ flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_INVALID_UMOUNT) {
+ /*
+ * invalidate_bdev drops all device cache include
+ * dirty. We use this to simulate device removal.
+ */
+ down(&p_fs->v_sem);
+ FAT_release_all(sb);
+ buf_release_all(sb);
+ up(&p_fs->v_sem);
+
+ invalidate_bdev(bdev);
+ }
+ }
+
+ kill_block_super(sb);
+}
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+static struct file_system_type exfat_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "exfat",
+ .mount = exfat_fs_mount,
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ .kill_sb = exfat_debug_kill_sb,
+#else
+ .kill_sb = kill_block_super,
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static int __init init_exfat(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct dos_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct ext_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct file_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct strm_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct name_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct bmap_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct case_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct volm_dentry_t) != DENTRY_SIZE);
+
+ pr_info("exFAT: Version %s\n", EXFAT_VERSION);
+
+ err = exfat_init_inodecache();
+ if (err)
+ return err;
+
+ err = register_filesystem(&exfat_fs_type);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void __exit exit_exfat(void)
+{
+ exfat_destroy_inodecache();
+ unregister_filesystem(&exfat_fs_type);
+}
+
+module_init(init_exfat);
+module_exit(exit_exfat);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("exFAT Filesystem Driver");
+MODULE_ALIAS_FS("exfat");
diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c
new file mode 100644
index 000000000000..366082fb3dab
--- /dev/null
+++ b/drivers/staging/exfat/exfat_upcase.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/types.h>
+#include "exfat.h"
+
+const u8 uni_upcase[NUM_UPCASE << 1] = {
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00,
+ 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00,
+ 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00,
+ 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00,
+ 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00,
+ 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00,
+ 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00,
+ 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00,
+ 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00,
+ 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00,
+ 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00,
+ 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00,
+ 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00,
+ 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00,
+ 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00,
+ 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
+ 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00,
+ 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
+ 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
+ 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00,
+ 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00,
+ 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00,
+ 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
+ 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00,
+ 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
+ 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
+ 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00,
+ 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00,
+ 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00,
+ 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00,
+ 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00,
+ 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00,
+ 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00,
+ 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00,
+ 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00,
+ 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00,
+ 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00,
+ 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00,
+ 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00,
+ 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00,
+ 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00,
+ 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00,
+ 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00,
+ 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
+ 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
+ 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
+ 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
+ 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
+ 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
+ 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
+ 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
+ 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01,
+ 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01,
+ 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01,
+ 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01,
+ 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01,
+ 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01,
+ 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01,
+ 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01,
+ 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01,
+ 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01,
+ 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01,
+ 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01,
+ 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01,
+ 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01,
+ 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01,
+ 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01,
+ 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01,
+ 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01,
+ 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01,
+ 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01,
+ 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01,
+ 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01,
+ 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01,
+ 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01,
+ 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01,
+ 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01,
+ 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01,
+ 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01,
+ 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01,
+ 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01,
+ 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01,
+ 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01,
+ 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01,
+ 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01,
+ 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01,
+ 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01,
+ 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01,
+ 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01,
+ 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01,
+ 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01,
+ 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01,
+ 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01,
+ 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01,
+ 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01,
+ 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01,
+ 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01,
+ 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01,
+ 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01,
+ 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01,
+ 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01,
+ 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01,
+ 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01,
+ 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01,
+ 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01,
+ 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01,
+ 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01,
+ 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01,
+ 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01,
+ 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01,
+ 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01,
+ 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01,
+ 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01,
+ 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01,
+ 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01,
+ 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02,
+ 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02,
+ 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02,
+ 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02,
+ 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02,
+ 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02,
+ 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02,
+ 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02,
+ 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02,
+ 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02,
+ 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02,
+ 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02,
+ 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02,
+ 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02,
+ 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02,
+ 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02,
+ 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02,
+ 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02,
+ 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02,
+ 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01,
+ 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01,
+ 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01,
+ 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02,
+ 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01,
+ 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02,
+ 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C,
+ 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01,
+ 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02,
+ 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02,
+ 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02,
+ 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02,
+ 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01,
+ 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02,
+ 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01,
+ 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02,
+ 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02,
+ 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02,
+ 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02,
+ 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02,
+ 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02,
+ 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02,
+ 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02,
+ 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02,
+ 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02,
+ 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02,
+ 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02,
+ 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02,
+ 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02,
+ 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02,
+ 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02,
+ 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02,
+ 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02,
+ 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02,
+ 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02,
+ 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02,
+ 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02,
+ 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02,
+ 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02,
+ 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02,
+ 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02,
+ 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02,
+ 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02,
+ 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02,
+ 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03,
+ 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03,
+ 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03,
+ 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03,
+ 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03,
+ 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03,
+ 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03,
+ 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03,
+ 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03,
+ 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03,
+ 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03,
+ 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03,
+ 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03,
+ 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03,
+ 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03,
+ 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03,
+ 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03,
+ 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03,
+ 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03,
+ 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03,
+ 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03,
+ 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03,
+ 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03,
+ 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03,
+ 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03,
+ 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03,
+ 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03,
+ 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03,
+ 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03,
+ 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03,
+ 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03,
+ 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03,
+ 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03,
+ 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03,
+ 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03,
+ 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03,
+ 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03,
+ 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
+ 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03,
+ 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
+ 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03,
+ 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
+ 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03,
+ 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03,
+ 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03,
+ 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
+ 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03,
+ 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
+ 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03,
+ 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
+ 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03,
+ 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03,
+ 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03,
+ 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03,
+ 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03,
+ 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03,
+ 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03,
+ 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03,
+ 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03,
+ 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03,
+ 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03,
+ 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03,
+ 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03,
+ 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03,
+ 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04,
+ 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
+ 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04,
+ 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
+ 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04,
+ 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
+ 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04,
+ 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
+ 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04,
+ 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
+ 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04,
+ 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
+ 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04,
+ 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
+ 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04,
+ 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
+ 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04,
+ 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
+ 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04,
+ 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
+ 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04,
+ 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
+ 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04,
+ 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
+ 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04,
+ 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04,
+ 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04,
+ 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04,
+ 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04,
+ 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04,
+ 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04,
+ 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04,
+ 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04,
+ 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04,
+ 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04,
+ 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04,
+ 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04,
+ 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04,
+ 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04,
+ 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04,
+ 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04,
+ 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04,
+ 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04,
+ 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04,
+ 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04,
+ 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04,
+ 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04,
+ 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04,
+ 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04,
+ 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04,
+ 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04,
+ 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04,
+ 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04,
+ 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04,
+ 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04,
+ 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04,
+ 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04,
+ 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04,
+ 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04,
+ 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04,
+ 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04,
+ 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04,
+ 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04,
+ 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04,
+ 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05,
+ 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05,
+ 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05,
+ 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05,
+ 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05,
+ 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05,
+ 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05,
+ 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05,
+ 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05,
+ 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05,
+ 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05,
+ 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05,
+ 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05,
+ 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
+ 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05,
+ 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
+ 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05,
+ 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
+ 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05,
+ 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
+ 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05,
+ 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05,
+ 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05,
+ 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05,
+ 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05,
+ 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
+ 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05,
+ 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
+ 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05,
+ 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
+ 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05,
+ 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
+ 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05,
+ 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF,
+ 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D,
+ 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D,
+ 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D,
+ 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D,
+ 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D,
+ 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D,
+ 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D,
+ 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D,
+ 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D,
+ 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D,
+ 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D,
+ 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D,
+ 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D,
+ 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D,
+ 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D,
+ 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D,
+ 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D,
+ 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D,
+ 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D,
+ 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D,
+ 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D,
+ 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D,
+ 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D,
+ 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D,
+ 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D,
+ 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D,
+ 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D,
+ 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D,
+ 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D,
+ 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D,
+ 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D,
+ 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D,
+ 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D,
+ 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E,
+ 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E,
+ 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E,
+ 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E,
+ 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E,
+ 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E,
+ 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E,
+ 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E,
+ 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E,
+ 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E,
+ 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E,
+ 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E,
+ 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E,
+ 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E,
+ 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E,
+ 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E,
+ 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E,
+ 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E,
+ 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E,
+ 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E,
+ 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E,
+ 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E,
+ 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E,
+ 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E,
+ 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E,
+ 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E,
+ 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E,
+ 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E,
+ 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E,
+ 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E,
+ 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E,
+ 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E,
+ 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E,
+ 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E,
+ 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E,
+ 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E,
+ 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E,
+ 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E,
+ 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E,
+ 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E,
+ 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E,
+ 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E,
+ 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E,
+ 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E,
+ 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E,
+ 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E,
+ 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E,
+ 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E,
+ 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E,
+ 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E,
+ 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E,
+ 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E,
+ 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E,
+ 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E,
+ 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E,
+ 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E,
+ 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E,
+ 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E,
+ 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E,
+ 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E,
+ 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E,
+ 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E,
+ 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E,
+ 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
+ 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F,
+ 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
+ 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F,
+ 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
+ 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F,
+ 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
+ 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F,
+ 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
+ 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F,
+ 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
+ 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F,
+ 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
+ 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F,
+ 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
+ 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F,
+ 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
+ 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F,
+ 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
+ 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F,
+ 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F,
+ 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F,
+ 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F,
+ 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F,
+ 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
+ 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F,
+ 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
+ 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F,
+ 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F,
+ 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
+ 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
+ 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F,
+ 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
+ 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F,
+ 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
+ 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F,
+ 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
+ 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F,
+ 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
+ 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F,
+ 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
+ 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F,
+ 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
+ 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F,
+ 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F,
+ 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F,
+ 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F,
+ 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F,
+ 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F,
+ 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F,
+ 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F,
+ 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F,
+ 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F,
+ 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F,
+ 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
+ 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F,
+ 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F,
+ 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F,
+ 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
+ 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F,
+ 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F,
+ 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F,
+ 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F,
+ 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F,
+ 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20,
+ 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20,
+ 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20,
+ 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20,
+ 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20,
+ 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20,
+ 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20,
+ 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20,
+ 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20,
+ 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20,
+ 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20,
+ 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20,
+ 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20,
+ 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20,
+ 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20,
+ 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20,
+ 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20,
+ 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20,
+ 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20,
+ 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20,
+ 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20,
+ 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20,
+ 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20,
+ 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20,
+ 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20,
+ 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20,
+ 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20,
+ 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20,
+ 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20,
+ 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20,
+ 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20,
+ 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20,
+ 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20,
+ 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20,
+ 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20,
+ 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20,
+ 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20,
+ 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20,
+ 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20,
+ 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20,
+ 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20,
+ 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20,
+ 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20,
+ 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20,
+ 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20,
+ 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20,
+ 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20,
+ 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20,
+ 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20,
+ 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20,
+ 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20,
+ 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20,
+ 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20,
+ 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20,
+ 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20,
+ 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20,
+ 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20,
+ 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20,
+ 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20,
+ 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20,
+ 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20,
+ 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20,
+ 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20,
+ 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20,
+ 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21,
+ 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21,
+ 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21,
+ 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21,
+ 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21,
+ 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21,
+ 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21,
+ 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21,
+ 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21,
+ 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21,
+ 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21,
+ 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21,
+ 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21,
+ 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21,
+ 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21,
+ 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21,
+ 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21,
+ 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21,
+ 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21,
+ 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21,
+ 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21,
+ 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21,
+ 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21,
+ 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21,
+ 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
+ 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21,
+ 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
+ 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21,
+ 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
+ 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21,
+ 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
+ 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21,
+ 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21,
+ 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24,
+ 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24,
+ 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24,
+ 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24,
+ 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24,
+ 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24,
+ 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24,
+ 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C,
+ 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C,
+ 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C,
+ 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C,
+ 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C,
+ 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C,
+ 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C,
+ 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C,
+ 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C,
+ 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C,
+ 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C,
+ 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C,
+ 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C,
+ 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C,
+ 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C,
+ 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C,
+ 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C,
+ 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C,
+ 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C,
+ 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C,
+ 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C,
+ 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C,
+ 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C,
+ 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C,
+ 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C,
+ 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C,
+ 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C,
+ 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C,
+ 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C,
+ 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C,
+ 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C,
+ 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C,
+ 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C,
+ 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C,
+ 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C,
+ 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C,
+ 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C,
+ 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C,
+ 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C,
+ 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C,
+ 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C,
+ 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C,
+ 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C,
+ 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C,
+ 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C,
+ 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C,
+ 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C,
+ 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C,
+ 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C,
+ 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C,
+ 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C,
+ 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C,
+ 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10,
+ 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10,
+ 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10,
+ 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10,
+ 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10,
+ 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10,
+ 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10,
+ 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10,
+ 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10,
+ 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10,
+ 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF,
+ 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF,
+ 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF,
+ 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF,
+ 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF,
+ 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF,
+ 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF,
+ 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF,
+ 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF,
+ 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF,
+ 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF,
+ 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF,
+ 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF,
+ 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF,
+ 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF,
+ 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF,
+ 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF,
+ 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF,
+ 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF,
+ 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF,
+ 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF,
+ 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF,
+ 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF,
+ 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF,
+ 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF,
+ 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF,
+ 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF,
+ 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF,
+ 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF,
+ 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF,
+ 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF,
+ 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF,
+ 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF,
+ 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF,
+ 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF,
+ 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF,
+ 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF,
+ 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF,
+ 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF,
+ 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF,
+ 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF,
+ 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF,
+ 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF,
+ 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF,
+ 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF,
+ 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF,
+ 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF,
+ 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF
+};
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
index 7ecfba4f2b06..240f9bb10b71 100644
--- a/drivers/staging/gasket/gasket_ioctl.c
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -39,8 +39,7 @@ static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
}
/* Read the size of the page table. */
-static int gasket_read_page_table_size(
- struct gasket_dev *gasket_dev,
+static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
@@ -66,8 +65,7 @@ static int gasket_read_page_table_size(
}
/* Read the size of the simple page table. */
-static int gasket_read_simple_page_table_size(
- struct gasket_dev *gasket_dev,
+static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
@@ -93,8 +91,7 @@ static int gasket_read_simple_page_table_size(
}
/* Set the boundary between the simple and extended page tables. */
-static int gasket_partition_page_table(
- struct gasket_dev *gasket_dev,
+static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret;
@@ -185,8 +182,7 @@ static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
* Reserve structures for coherent allocation, and allocate or free the
* corresponding memory.
*/
-static int gasket_config_coherent_allocator(
- struct gasket_dev *gasket_dev,
+static int gasket_config_coherent_allocator(struct gasket_dev *gasket_dev,
struct gasket_coherent_alloc_config_ioctl __user *argp)
{
int ret;
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
index 806e75b7f405..3d2c6f88a138 100644
--- a/drivers/staging/greybus/Documentation/firmware/authenticate.c
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -2,54 +2,8 @@
/*
* Sample code to test CAP protocol
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
index 31d9c23e2eeb..765d69faa9cc 100644
--- a/drivers/staging/greybus/Documentation/firmware/firmware.c
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -2,54 +2,8 @@
/*
* Sample code to test firmware-management protocol
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index 4894c3514955..d4777f5a8b90 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -1,33 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-menuconfig GREYBUS
- tristate "Greybus support"
- depends on SYSFS
- ---help---
- This option enables the Greybus driver core. Greybus is an
- hardware protocol that was designed to provide Unipro with a
- sane application layer. It was originally designed for the
- ARA project, a module phone system, but has shown up in other
- phones, and can be tunneled over other busses in order to
- control hardware devices.
-
- Say Y here to enable support for these types of drivers.
-
- To compile this code as a module, chose M here: the module
- will be called greybus.ko
-
if GREYBUS
-config GREYBUS_ES2
- tristate "Greybus ES3 USB host controller"
- depends on USB
- ---help---
- Select this option if you have a Toshiba ES3 USB device that
- acts as a Greybus "host controller". This device is a bridge
- from a USB device to a Unipro network.
-
- To compile this code as a module, chose M here: the module
- will be called gb-es2.ko
-
config GREYBUS_AUDIO
tristate "Greybus Audio Class driver"
depends on SOUND
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
index 2551ed16b742..627e44f2a983 100644
--- a/drivers/staging/greybus/Makefile
+++ b/drivers/staging/greybus/Makefile
@@ -1,29 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-# Greybus core
-greybus-y := core.o \
- debugfs.o \
- hd.o \
- manifest.o \
- module.o \
- interface.o \
- bundle.o \
- connection.o \
- control.o \
- svc.o \
- svc_watchdog.o \
- operation.o
-
-obj-$(CONFIG_GREYBUS) += greybus.o
-
# needed for trace events
ccflags-y += -I$(src)
-
-# Greybus Host controller drivers
-gb-es2-y := es2.o
-
-obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
-
# Greybus class drivers
gb-bootrom-y := bootrom.o
gb-camera-y := camera.o
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 6eb842040c22..eebf0deb39f5 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -19,8 +19,8 @@
#include <linux/irq.h>
#include <linux/suspend.h>
#include <linux/time.h>
+#include <linux/greybus.h>
#include "arche_platform.h"
-#include "greybus.h"
#if IS_ENABLED(CONFIG_USB_HSIC_USB3613)
#include <linux/usb/usb3613.h>
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
deleted file mode 100644
index 3dab6375909c..000000000000
--- a/drivers/staging/greybus/arpc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __ARPC_H
-#define __ARPC_H
-
-/* APBridgeA RPC (ARPC) */
-
-enum arpc_result {
- ARPC_SUCCESS = 0x00,
- ARPC_NO_MEMORY = 0x01,
- ARPC_INVALID = 0x02,
- ARPC_TIMEOUT = 0x03,
- ARPC_UNKNOWN_ERROR = 0xff,
-};
-
-struct arpc_request_message {
- __le16 id; /* RPC unique id */
- __le16 size; /* Size in bytes of header + payload */
- __u8 type; /* RPC type */
- __u8 data[0]; /* ARPC data */
-} __packed;
-
-struct arpc_response_message {
- __le16 id; /* RPC unique id */
- __u8 result; /* Result of RPC */
-} __packed;
-
-/* ARPC requests */
-#define ARPC_TYPE_CPORT_CONNECTED 0x01
-#define ARPC_TYPE_CPORT_QUIESCE 0x02
-#define ARPC_TYPE_CPORT_CLEAR 0x03
-#define ARPC_TYPE_CPORT_FLUSH 0x04
-#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
-
-struct arpc_cport_connected_req {
- __le16 cport_id;
-} __packed;
-
-struct arpc_cport_quiesce_req {
- __le16 cport_id;
- __le16 peer_space;
- __le16 timeout;
-} __packed;
-
-struct arpc_cport_clear_req {
- __le16 cport_id;
-} __packed;
-
-struct arpc_cport_flush_req {
- __le16 cport_id;
-} __packed;
-
-struct arpc_cport_shutdown_req {
- __le16 cport_id;
- __le16 timeout;
- __u8 phase;
-} __packed;
-
-#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
index 7ebb1bde5cb7..26117e390deb 100644
--- a/drivers/staging/greybus/audio_apbridgea.c
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -5,8 +5,7 @@
* Copyright 2015-2016 Google Inc.
*/
-#include "greybus.h"
-#include "greybus_protocols.h"
+#include <linux/greybus.h>
#include "audio_apbridgea.h"
#include "audio_codec.h"
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index 330fc7a397eb..3f1f4dd2c61a 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -1,30 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/**
+/*
* Copyright (c) 2015-2016 Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This is a special protocol for configuring communication over the
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index 9ba09ea9c2fc..cb5d271da1a5 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -8,12 +8,10 @@
#ifndef __LINUX_GBAUDIO_CODEC_H
#define __LINUX_GBAUDIO_CODEC_H
+#include <linux/greybus.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "greybus.h"
-#include "greybus_protocols.h"
-
#define NAME_SIZE 32
#define MAX_DAIS 2 /* APB1, APB2 */
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
index 8894f1c87d48..9d8994fdb41a 100644
--- a/drivers/staging/greybus/audio_gb.c
+++ b/drivers/staging/greybus/audio_gb.c
@@ -5,9 +5,7 @@
* Copyright 2015-2016 Google Inc.
*/
-#include "greybus.h"
-#include "greybus_protocols.h"
-#include "operation.h"
+#include <linux/greybus.h>
#include "audio_codec.h"
/* TODO: Split into separate calls */
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index a5d7c53df987..297e69f011c7 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -6,8 +6,7 @@
* Copyright 2016 Linaro Ltd.
*/
-#include "greybus.h"
-
+#include <linux/greybus.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index 402e6360834f..a8efb86de140 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -10,8 +10,8 @@
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "firmware.h"
/* Timeout, in jiffies, within which the next request must be received */
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
deleted file mode 100644
index 8734d2055657..000000000000
--- a/drivers/staging/greybus/bundle.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus bundles
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __BUNDLE_H
-#define __BUNDLE_H
-
-#include <linux/list.h>
-
-#define BUNDLE_ID_NONE U8_MAX
-
-/* Greybus "public" definitions" */
-struct gb_bundle {
- struct device dev;
- struct gb_interface *intf;
-
- u8 id;
- u8 class;
- u8 class_major;
- u8 class_minor;
-
- size_t num_cports;
- struct greybus_descriptor_cport *cport_desc;
-
- struct list_head connections;
- u8 *state;
-
- struct list_head links; /* interface->bundles */
-};
-#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
-
-/* Greybus "private" definitions" */
-struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
- u8 class);
-int gb_bundle_add(struct gb_bundle *bundle);
-void gb_bundle_destroy(struct gb_bundle *bundle);
-
-/* Bundle Runtime PM wrappers */
-#ifdef CONFIG_PM
-static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
-{
- int retval;
-
- retval = pm_runtime_get_sync(&bundle->dev);
- if (retval < 0) {
- dev_err(&bundle->dev,
- "pm_runtime_get_sync failed: %d\n", retval);
- pm_runtime_put_noidle(&bundle->dev);
- return retval;
- }
-
- return 0;
-}
-
-static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
-{
- int retval;
-
- pm_runtime_mark_last_busy(&bundle->dev);
- retval = pm_runtime_put_autosuspend(&bundle->dev);
-
- return retval;
-}
-
-static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
-{
- pm_runtime_get_noresume(&bundle->dev);
-}
-
-static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
-{
- pm_runtime_put_noidle(&bundle->dev);
-}
-
-#else
-static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
-{ return 0; }
-static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
-{ return 0; }
-
-static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
-static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
-#endif
-
-#endif /* __BUNDLE_H */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 615c8e7fd51e..b570e13394ac 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -14,9 +14,9 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+#include <linux/greybus.h>
#include "gb-camera.h"
-#include "greybus.h"
#include "greybus_protocols.h"
enum gb_camera_debugs_buffer_id {
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
deleted file mode 100644
index 5ca3befc0636..000000000000
--- a/drivers/staging/greybus/connection.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus connections
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __CONNECTION_H
-#define __CONNECTION_H
-
-#include <linux/list.h>
-#include <linux/kfifo.h>
-
-#define GB_CONNECTION_FLAG_CSD BIT(0)
-#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
-#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
-#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
-#define GB_CONNECTION_FLAG_CONTROL BIT(4)
-#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
-
-#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
-
-enum gb_connection_state {
- GB_CONNECTION_STATE_DISABLED = 0,
- GB_CONNECTION_STATE_ENABLED_TX = 1,
- GB_CONNECTION_STATE_ENABLED = 2,
- GB_CONNECTION_STATE_DISCONNECTING = 3,
-};
-
-struct gb_operation;
-
-typedef int (*gb_request_handler_t)(struct gb_operation *);
-
-struct gb_connection {
- struct gb_host_device *hd;
- struct gb_interface *intf;
- struct gb_bundle *bundle;
- struct kref kref;
- u16 hd_cport_id;
- u16 intf_cport_id;
-
- struct list_head hd_links;
- struct list_head bundle_links;
-
- gb_request_handler_t handler;
- unsigned long flags;
-
- struct mutex mutex;
- spinlock_t lock;
- enum gb_connection_state state;
- struct list_head operations;
-
- char name[16];
- struct workqueue_struct *wq;
-
- atomic_t op_cycle;
-
- void *private;
-
- bool mode_switch;
-};
-
-struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
- u16 hd_cport_id, gb_request_handler_t handler);
-struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
-struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
- u16 cport_id, gb_request_handler_t handler);
-struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
- u16 cport_id, gb_request_handler_t handler,
- unsigned long flags);
-struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
- u16 cport_id, unsigned long flags);
-void gb_connection_destroy(struct gb_connection *connection);
-
-static inline bool gb_connection_is_static(struct gb_connection *connection)
-{
- return !connection->intf;
-}
-
-int gb_connection_enable(struct gb_connection *connection);
-int gb_connection_enable_tx(struct gb_connection *connection);
-void gb_connection_disable_rx(struct gb_connection *connection);
-void gb_connection_disable(struct gb_connection *connection);
-void gb_connection_disable_forced(struct gb_connection *connection);
-
-void gb_connection_mode_switch_prepare(struct gb_connection *connection);
-void gb_connection_mode_switch_complete(struct gb_connection *connection);
-
-void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
- u8 *data, size_t length);
-
-void gb_connection_latency_tag_enable(struct gb_connection *connection);
-void gb_connection_latency_tag_disable(struct gb_connection *connection);
-
-static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
-{
- return !(connection->flags & GB_CONNECTION_FLAG_CSD);
-}
-
-static inline bool
-gb_connection_flow_control_disabled(struct gb_connection *connection)
-{
- return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
-}
-
-static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
-{
- return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
-}
-
-static inline bool gb_connection_is_control(struct gb_connection *connection)
-{
- return connection->flags & GB_CONNECTION_FLAG_CONTROL;
-}
-
-static inline void *gb_connection_get_data(struct gb_connection *connection)
-{
- return connection->private;
-}
-
-static inline void gb_connection_set_data(struct gb_connection *connection,
- void *data)
-{
- connection->private = data;
-}
-
-#endif /* __CONNECTION_H */
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
deleted file mode 100644
index 3a29ec05f631..000000000000
--- a/drivers/staging/greybus/control.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus CPort control protocol
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-
-#ifndef __CONTROL_H
-#define __CONTROL_H
-
-struct gb_control {
- struct device dev;
- struct gb_interface *intf;
-
- struct gb_connection *connection;
-
- u8 protocol_major;
- u8 protocol_minor;
-
- bool has_bundle_activate;
- bool has_bundle_version;
-
- char *vendor_string;
- char *product_string;
-};
-#define to_gb_control(d) container_of(d, struct gb_control, dev)
-
-struct gb_control *gb_control_create(struct gb_interface *intf);
-int gb_control_enable(struct gb_control *control);
-void gb_control_disable(struct gb_control *control);
-int gb_control_suspend(struct gb_control *control);
-int gb_control_resume(struct gb_control *control);
-int gb_control_add(struct gb_control *control);
-void gb_control_del(struct gb_control *control);
-struct gb_control *gb_control_get(struct gb_control *control);
-void gb_control_put(struct gb_control *control);
-
-int gb_control_get_bundle_versions(struct gb_control *control);
-int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
-int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
-int gb_control_disconnecting_operation(struct gb_control *control,
- u16 cport_id);
-int gb_control_mode_switch_operation(struct gb_control *control);
-void gb_control_mode_switch_prepare(struct gb_control *control);
-void gb_control_mode_switch_complete(struct gb_control *control);
-int gb_control_get_manifest_size_operation(struct gb_interface *intf);
-int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
- size_t size);
-int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
-int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
-int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
-int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
-int gb_control_interface_suspend_prepare(struct gb_control *control);
-int gb_control_interface_deactivate_prepare(struct gb_control *control);
-int gb_control_interface_hibernate_abort(struct gb_control *control);
-#endif /* __CONTROL_H */
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
index 946221307ef6..5d2564462ffc 100644
--- a/drivers/staging/greybus/firmware.h
+++ b/drivers/staging/greybus/firmware.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Firmware Management Header
*
@@ -9,7 +9,7 @@
#ifndef __FIRMWARE_H
#define __FIRMWARE_H
-#include "greybus.h"
+#include <linux/greybus.h>
#define FW_NAME_PREFIX "gmp_"
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
index 388866d92f5b..57bebf24636b 100644
--- a/drivers/staging/greybus/fw-core.c
+++ b/drivers/staging/greybus/fw-core.c
@@ -8,8 +8,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/firmware.h>
+#include <linux/greybus.h>
#include "firmware.h"
-#include "greybus.h"
#include "spilib.h"
struct gb_fw_core {
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
index d3b7cccbc10d..543692c567f9 100644
--- a/drivers/staging/greybus/fw-download.c
+++ b/drivers/staging/greybus/fw-download.c
@@ -10,8 +10,8 @@
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
#include "firmware.h"
-#include "greybus.h"
/* Estimated minimum buffer size, actual size can be smaller than this */
#define MIN_FETCH_SIZE 512
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 71aec14f8181..687c6405c65b 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -13,10 +13,10 @@
#include <linux/idr.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
+#include <linux/greybus.h>
#include "firmware.h"
#include "greybus_firmware.h"
-#include "greybus.h"
#define FW_MGMT_TIMEOUT_MS 1000
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
index ee293e461fc3..5fc469101fc1 100644
--- a/drivers/staging/greybus/gb-camera.h
+++ b/drivers/staging/greybus/gb-camera.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Camera protocol driver.
*
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 6cb85c3d3572..9fc5c47be9bd 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -13,8 +13,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
#define GB_GBPHY_AUTOSUSPEND_MS 3000
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
index 99463489d7d6..087928a586fb 100644
--- a/drivers/staging/greybus/gbphy.h
+++ b/drivers/staging/greybus/gbphy.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Bridged-Phy Bus driver
*
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 3151004d26fb..1ff34abd5692 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -13,8 +13,8 @@
#include <linux/irqdomain.h>
#include <linux/gpio/driver.h>
#include <linux/mutex.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_gpio_line {
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
deleted file mode 100644
index d03ddb7c9df0..000000000000
--- a/drivers/staging/greybus/greybus.h
+++ /dev/null
@@ -1,152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus driver and device API
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#ifndef __LINUX_GREYBUS_H
-#define __LINUX_GREYBUS_H
-
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/idr.h>
-
-#include "greybus_id.h"
-#include "greybus_manifest.h"
-#include "greybus_protocols.h"
-#include "manifest.h"
-#include "hd.h"
-#include "svc.h"
-#include "control.h"
-#include "module.h"
-#include "interface.h"
-#include "bundle.h"
-#include "connection.h"
-#include "operation.h"
-
-/* Matches up with the Greybus Protocol specification document */
-#define GREYBUS_VERSION_MAJOR 0x00
-#define GREYBUS_VERSION_MINOR 0x01
-
-#define GREYBUS_ID_MATCH_DEVICE \
- (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
-
-#define GREYBUS_DEVICE(v, p) \
- .match_flags = GREYBUS_ID_MATCH_DEVICE, \
- .vendor = (v), \
- .product = (p),
-
-#define GREYBUS_DEVICE_CLASS(c) \
- .match_flags = GREYBUS_ID_MATCH_CLASS, \
- .class = (c),
-
-/* Maximum number of CPorts */
-#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
-#define CPORT_ID_BAD U16_MAX
-
-struct greybus_driver {
- const char *name;
-
- int (*probe)(struct gb_bundle *bundle,
- const struct greybus_bundle_id *id);
- void (*disconnect)(struct gb_bundle *bundle);
-
- const struct greybus_bundle_id *id_table;
-
- struct device_driver driver;
-};
-#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
-
-static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
-{
- dev_set_drvdata(&bundle->dev, data);
-}
-
-static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
-{
- return dev_get_drvdata(&bundle->dev);
-}
-
-/* Don't call these directly, use the module_greybus_driver() macro instead */
-int greybus_register_driver(struct greybus_driver *driver,
- struct module *module, const char *mod_name);
-void greybus_deregister_driver(struct greybus_driver *driver);
-
-/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
-#define greybus_register(driver) \
- greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
-#define greybus_deregister(driver) \
- greybus_deregister_driver(driver)
-
-/**
- * module_greybus_driver() - Helper macro for registering a Greybus driver
- * @__greybus_driver: greybus_driver structure
- *
- * Helper macro for Greybus drivers to set up proper module init / exit
- * functions. Replaces module_init() and module_exit() and keeps people from
- * printing pointless things to the kernel log when their driver is loaded.
- */
-#define module_greybus_driver(__greybus_driver) \
- module_driver(__greybus_driver, greybus_register, greybus_deregister)
-
-int greybus_disabled(void);
-
-void gb_debugfs_init(void);
-void gb_debugfs_cleanup(void);
-struct dentry *gb_debugfs_get(void);
-
-extern struct bus_type greybus_bus_type;
-
-extern struct device_type greybus_hd_type;
-extern struct device_type greybus_module_type;
-extern struct device_type greybus_interface_type;
-extern struct device_type greybus_control_type;
-extern struct device_type greybus_bundle_type;
-extern struct device_type greybus_svc_type;
-
-static inline int is_gb_host_device(const struct device *dev)
-{
- return dev->type == &greybus_hd_type;
-}
-
-static inline int is_gb_module(const struct device *dev)
-{
- return dev->type == &greybus_module_type;
-}
-
-static inline int is_gb_interface(const struct device *dev)
-{
- return dev->type == &greybus_interface_type;
-}
-
-static inline int is_gb_control(const struct device *dev)
-{
- return dev->type == &greybus_control_type;
-}
-
-static inline int is_gb_bundle(const struct device *dev)
-{
- return dev->type == &greybus_bundle_type;
-}
-
-static inline int is_gb_svc(const struct device *dev)
-{
- return dev->type == &greybus_svc_type;
-}
-
-static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
-{
- return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
-}
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_GREYBUS_H */
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
index 03ea9615b217..7edc7295b7ab 100644
--- a/drivers/staging/greybus/greybus_authentication.h
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -1,55 +1,9 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Greybus Component Authentication User Header
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GREYBUS_AUTHENTICATION_USER_H
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
index b58281a63ba4..f68fd5e25321 100644
--- a/drivers/staging/greybus/greybus_firmware.h
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -1,55 +1,9 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Greybus Firmware Management User Header
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GREYBUS_FIRMWARE_USER_H
diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h
deleted file mode 100644
index f4c8440093e4..000000000000
--- a/drivers/staging/greybus/greybus_id.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* FIXME
- * move this to include/linux/mod_devicetable.h when merging
- */
-
-#ifndef __LINUX_GREYBUS_ID_H
-#define __LINUX_GREYBUS_ID_H
-
-#include <linux/types.h>
-#include <linux/mod_devicetable.h>
-
-
-struct greybus_bundle_id {
- __u16 match_flags;
- __u32 vendor;
- __u32 product;
- __u8 class;
-
- kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
-};
-
-/* Used to match the greybus_bundle_id */
-#define GREYBUS_ID_MATCH_VENDOR BIT(0)
-#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
-#define GREYBUS_ID_MATCH_CLASS BIT(2)
-
-#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
deleted file mode 100644
index 2cec5cf7a846..000000000000
--- a/drivers/staging/greybus/greybus_manifest.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus manifest definition
- *
- * See "Greybus Application Protocol" document (version 0.1) for
- * details on these values and structures.
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 and BSD licenses.
- */
-
-#ifndef __GREYBUS_MANIFEST_H
-#define __GREYBUS_MANIFEST_H
-
-enum greybus_descriptor_type {
- GREYBUS_TYPE_INVALID = 0x00,
- GREYBUS_TYPE_INTERFACE = 0x01,
- GREYBUS_TYPE_STRING = 0x02,
- GREYBUS_TYPE_BUNDLE = 0x03,
- GREYBUS_TYPE_CPORT = 0x04,
-};
-
-enum greybus_protocol {
- GREYBUS_PROTOCOL_CONTROL = 0x00,
- /* 0x01 is unused */
- GREYBUS_PROTOCOL_GPIO = 0x02,
- GREYBUS_PROTOCOL_I2C = 0x03,
- GREYBUS_PROTOCOL_UART = 0x04,
- GREYBUS_PROTOCOL_HID = 0x05,
- GREYBUS_PROTOCOL_USB = 0x06,
- GREYBUS_PROTOCOL_SDIO = 0x07,
- GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
- GREYBUS_PROTOCOL_PWM = 0x09,
- /* 0x0a is unused */
- GREYBUS_PROTOCOL_SPI = 0x0b,
- GREYBUS_PROTOCOL_DISPLAY = 0x0c,
- GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
- GREYBUS_PROTOCOL_SENSOR = 0x0e,
- GREYBUS_PROTOCOL_LIGHTS = 0x0f,
- GREYBUS_PROTOCOL_VIBRATOR = 0x10,
- GREYBUS_PROTOCOL_LOOPBACK = 0x11,
- GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
- GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
- GREYBUS_PROTOCOL_SVC = 0x14,
- GREYBUS_PROTOCOL_BOOTROM = 0x15,
- GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
- GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
- GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
- GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
- GREYBUS_PROTOCOL_LOG = 0x1a,
- /* ... */
- GREYBUS_PROTOCOL_RAW = 0xfe,
- GREYBUS_PROTOCOL_VENDOR = 0xff,
-};
-
-enum greybus_class_type {
- GREYBUS_CLASS_CONTROL = 0x00,
- /* 0x01 is unused */
- /* 0x02 is unused */
- /* 0x03 is unused */
- /* 0x04 is unused */
- GREYBUS_CLASS_HID = 0x05,
- /* 0x06 is unused */
- /* 0x07 is unused */
- GREYBUS_CLASS_POWER_SUPPLY = 0x08,
- /* 0x09 is unused */
- GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
- /* 0x0b is unused */
- GREYBUS_CLASS_DISPLAY = 0x0c,
- GREYBUS_CLASS_CAMERA = 0x0d,
- GREYBUS_CLASS_SENSOR = 0x0e,
- GREYBUS_CLASS_LIGHTS = 0x0f,
- GREYBUS_CLASS_VIBRATOR = 0x10,
- GREYBUS_CLASS_LOOPBACK = 0x11,
- GREYBUS_CLASS_AUDIO = 0x12,
- /* 0x13 is unused */
- /* 0x14 is unused */
- GREYBUS_CLASS_BOOTROM = 0x15,
- GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
- GREYBUS_CLASS_LOG = 0x17,
- /* ... */
- GREYBUS_CLASS_RAW = 0xfe,
- GREYBUS_CLASS_VENDOR = 0xff,
-};
-
-enum {
- GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
-};
-
-/*
- * The string in a string descriptor is not NUL-terminated. The
- * size of the descriptor will be rounded up to a multiple of 4
- * bytes, by padding the string with 0x00 bytes if necessary.
- */
-struct greybus_descriptor_string {
- __u8 length;
- __u8 id;
- __u8 string[0];
-} __packed;
-
-/*
- * An interface descriptor describes information about an interface as a whole,
- * *not* the functions within it.
- */
-struct greybus_descriptor_interface {
- __u8 vendor_stringid;
- __u8 product_stringid;
- __u8 features;
- __u8 pad;
-} __packed;
-
-/*
- * An bundle descriptor defines an identification number and a class for
- * each bundle.
- *
- * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
- * allow CPort descriptors to specify which bundle they are associated with.
- * The first bundle will have id 0, second will have 1 and so on.
- *
- * The largest CPort id associated with an bundle (defined by a
- * CPort descriptor in the manifest) is used to determine how to
- * encode the device id and module number in UniPro packets
- * that use the bundle.
- *
- * @class: It is used by kernel to know the functionality provided by the
- * bundle and will be matched against drivers functinality while probing greybus
- * driver. It should contain one of the values defined in
- * 'enum greybus_class_type'.
- *
- */
-struct greybus_descriptor_bundle {
- __u8 id; /* interface-relative id (0..) */
- __u8 class;
- __u8 pad[2];
-} __packed;
-
-/*
- * A CPort descriptor indicates the id of the bundle within the
- * module it's associated with, along with the CPort id used to
- * address the CPort. The protocol id defines the format of messages
- * exchanged using the CPort.
- */
-struct greybus_descriptor_cport {
- __le16 id;
- __u8 bundle;
- __u8 protocol_id; /* enum greybus_protocol */
-} __packed;
-
-struct greybus_descriptor_header {
- __le16 size;
- __u8 type; /* enum greybus_descriptor_type */
- __u8 pad;
-} __packed;
-
-struct greybus_descriptor {
- struct greybus_descriptor_header header;
- union {
- struct greybus_descriptor_string string;
- struct greybus_descriptor_interface interface;
- struct greybus_descriptor_bundle bundle;
- struct greybus_descriptor_cport cport;
- };
-} __packed;
-
-struct greybus_manifest_header {
- __le16 size;
- __u8 version_major;
- __u8 version_minor;
-} __packed;
-
-struct greybus_manifest {
- struct greybus_manifest_header header;
- struct greybus_descriptor descriptors[0];
-} __packed;
-
-#endif /* __GREYBUS_MANIFEST_H */
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
deleted file mode 100644
index ddc73f10eb22..000000000000
--- a/drivers/staging/greybus/greybus_protocols.h
+++ /dev/null
@@ -1,2222 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
- * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
- * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __GREYBUS_PROTOCOLS_H
-#define __GREYBUS_PROTOCOLS_H
-
-/* Fixed IDs for control/svc protocols */
-
-/* SVC switch-port device ids */
-#define GB_SVC_DEVICE_ID_SVC 0
-#define GB_SVC_DEVICE_ID_AP 1
-#define GB_SVC_DEVICE_ID_MIN 2
-#define GB_SVC_DEVICE_ID_MAX 31
-
-#define GB_SVC_CPORT_ID 0
-#define GB_CONTROL_BUNDLE_ID 0
-#define GB_CONTROL_CPORT_ID 0
-
-
-/*
- * All operation messages (both requests and responses) begin with
- * a header that encodes the size of the message (header included).
- * This header also contains a unique identifier, that associates a
- * response message with its operation. The header contains an
- * operation type field, whose interpretation is dependent on what
- * type of protocol is used over the connection. The high bit
- * (0x80) of the operation type field is used to indicate whether
- * the message is a request (clear) or a response (set).
- *
- * Response messages include an additional result byte, which
- * communicates the result of the corresponding request. A zero
- * result value means the operation completed successfully. Any
- * other value indicates an error; in this case, the payload of the
- * response message (if any) is ignored. The result byte must be
- * zero in the header for a request message.
- *
- * The wire format for all numeric fields in the header is little
- * endian. Any operation-specific data begins immediately after the
- * header.
- */
-struct gb_operation_msg_hdr {
- __le16 size; /* Size in bytes of header + payload */
- __le16 operation_id; /* Operation unique id */
- __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
- __u8 result; /* Result of request (in responses only) */
- __u8 pad[2]; /* must be zero (ignore when read) */
-} __packed;
-
-
-/* Generic request types */
-#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00
-#define GB_REQUEST_TYPE_INVALID 0x7f
-
-struct gb_cport_shutdown_request {
- __u8 phase;
-} __packed;
-
-
-/* Control Protocol */
-
-/* Greybus control request types */
-#define GB_CONTROL_TYPE_VERSION 0x01
-#define GB_CONTROL_TYPE_PROBE_AP 0x02
-#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03
-#define GB_CONTROL_TYPE_GET_MANIFEST 0x04
-#define GB_CONTROL_TYPE_CONNECTED 0x05
-#define GB_CONTROL_TYPE_DISCONNECTED 0x06
-#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07
-#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08
-#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
-/* Unused 0x0a */
-#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b
-#define GB_CONTROL_TYPE_DISCONNECTING 0x0c
-#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d
-#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e
-#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f
-#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10
-#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11
-#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12
-#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13
-#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14
-#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15
-
-struct gb_control_version_request {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_control_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_control_bundle_version_request {
- __u8 bundle_id;
-} __packed;
-
-struct gb_control_bundle_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-/* Control protocol manifest get size request has no payload*/
-struct gb_control_get_manifest_size_response {
- __le16 size;
-} __packed;
-
-/* Control protocol manifest get request has no payload */
-struct gb_control_get_manifest_response {
- __u8 data[0];
-} __packed;
-
-/* Control protocol [dis]connected request */
-struct gb_control_connected_request {
- __le16 cport_id;
-} __packed;
-
-struct gb_control_disconnecting_request {
- __le16 cport_id;
-} __packed;
-/* disconnecting response has no payload */
-
-struct gb_control_disconnected_request {
- __le16 cport_id;
-} __packed;
-/* Control protocol [dis]connected response has no payload */
-
-/*
- * All Bundle power management operations use the same request and response
- * layout and status codes.
- */
-
-#define GB_CONTROL_BUNDLE_PM_OK 0x00
-#define GB_CONTROL_BUNDLE_PM_INVAL 0x01
-#define GB_CONTROL_BUNDLE_PM_BUSY 0x02
-#define GB_CONTROL_BUNDLE_PM_FAIL 0x03
-#define GB_CONTROL_BUNDLE_PM_NA 0x04
-
-struct gb_control_bundle_pm_request {
- __u8 bundle_id;
-} __packed;
-
-struct gb_control_bundle_pm_response {
- __u8 status;
-} __packed;
-
-/*
- * Interface Suspend Prepare and Deactivate Prepare operations use the same
- * response layout and error codes. Define a single response structure and reuse
- * it. Both operations have no payload.
- */
-
-#define GB_CONTROL_INTF_PM_OK 0x00
-#define GB_CONTROL_INTF_PM_BUSY 0x01
-#define GB_CONTROL_INTF_PM_NA 0x02
-
-struct gb_control_intf_pm_response {
- __u8 status;
-} __packed;
-
-/* APBridge protocol */
-
-/* request APB1 log */
-#define GB_APB_REQUEST_LOG 0x02
-
-/* request to map a cport to bulk in and bulk out endpoints */
-#define GB_APB_REQUEST_EP_MAPPING 0x03
-
-/* request to get the number of cports available */
-#define GB_APB_REQUEST_CPORT_COUNT 0x04
-
-/* request to reset a cport state */
-#define GB_APB_REQUEST_RESET_CPORT 0x05
-
-/* request to time the latency of messages on a given cport */
-#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06
-#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07
-
-/* request to control the CSI transmitter */
-#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08
-
-/* request to control audio streaming */
-#define GB_APB_REQUEST_AUDIO_CONTROL 0x09
-
-/* TimeSync requests */
-#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d
-#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e
-#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f
-#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
-
-/* requests to set Greybus CPort flags */
-#define GB_APB_REQUEST_CPORT_FLAGS 0x11
-
-/* ARPC request */
-#define GB_APB_REQUEST_ARPC_RUN 0x12
-
-struct gb_apb_request_cport_flags {
- __le32 flags;
-#define GB_APB_CPORT_FLAG_CONTROL 0x01
-#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02
-} __packed;
-
-
-/* Firmware Download Protocol */
-
-/* Request Types */
-#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01
-#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02
-#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03
-
-#define GB_FIRMWARE_TAG_MAX_SIZE 10
-
-/* firmware download find firmware request/response */
-struct gb_fw_download_find_firmware_request {
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-
-struct gb_fw_download_find_firmware_response {
- __u8 firmware_id;
- __le32 size;
-} __packed;
-
-/* firmware download fetch firmware request/response */
-struct gb_fw_download_fetch_firmware_request {
- __u8 firmware_id;
- __le32 offset;
- __le32 size;
-} __packed;
-
-struct gb_fw_download_fetch_firmware_response {
- __u8 data[0];
-} __packed;
-
-/* firmware download release firmware request */
-struct gb_fw_download_release_firmware_request {
- __u8 firmware_id;
-} __packed;
-/* firmware download release firmware response has no payload */
-
-
-/* Firmware Management Protocol */
-
-/* Request Types */
-#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01
-#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02
-#define GB_FW_MGMT_TYPE_LOADED_FW 0x03
-#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04
-#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05
-#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06
-
-#define GB_FW_LOAD_METHOD_UNIPRO 0x01
-#define GB_FW_LOAD_METHOD_INTERNAL 0x02
-
-#define GB_FW_LOAD_STATUS_FAILED 0x00
-#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
-#define GB_FW_LOAD_STATUS_VALIDATED 0x02
-#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
-
-#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
-#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
-#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
-#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
-#define GB_FW_BACKEND_FW_STATUS_INT 0x05
-#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
-#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
-
-#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
-#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
-#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
-#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
-#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
-
-/* firmware management interface firmware version request has no payload */
-struct gb_fw_mgmt_interface_fw_version_response {
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
- __le16 major;
- __le16 minor;
-} __packed;
-
-/* firmware management load and validate firmware request/response */
-struct gb_fw_mgmt_load_and_validate_fw_request {
- __u8 request_id;
- __u8 load_method;
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-/* firmware management load and validate firmware response has no payload*/
-
-/* firmware management loaded firmware request */
-struct gb_fw_mgmt_loaded_fw_request {
- __u8 request_id;
- __u8 status;
- __le16 major;
- __le16 minor;
-} __packed;
-/* firmware management loaded firmware response has no payload */
-
-/* firmware management backend firmware version request/response */
-struct gb_fw_mgmt_backend_fw_version_request {
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-
-struct gb_fw_mgmt_backend_fw_version_response {
- __le16 major;
- __le16 minor;
- __u8 status;
-} __packed;
-
-/* firmware management backend firmware update request */
-struct gb_fw_mgmt_backend_fw_update_request {
- __u8 request_id;
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-/* firmware management backend firmware update response has no payload */
-
-/* firmware management backend firmware updated request */
-struct gb_fw_mgmt_backend_fw_updated_request {
- __u8 request_id;
- __u8 status;
-} __packed;
-/* firmware management backend firmware updated response has no payload */
-
-
-/* Component Authentication Protocol (CAP) */
-
-/* Request Types */
-#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01
-#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02
-#define GB_CAP_TYPE_AUTHENTICATE 0x03
-
-/* CAP get endpoint uid request has no payload */
-struct gb_cap_get_endpoint_uid_response {
- __u8 uid[8];
-} __packed;
-
-/* CAP get endpoint ims certificate request/response */
-struct gb_cap_get_ims_certificate_request {
- __le32 certificate_class;
- __le32 certificate_id;
-} __packed;
-
-struct gb_cap_get_ims_certificate_response {
- __u8 result_code;
- __u8 certificate[0];
-} __packed;
-
-/* CAP authenticate request/response */
-struct gb_cap_authenticate_request {
- __le32 auth_type;
- __u8 uid[8];
- __u8 challenge[32];
-} __packed;
-
-struct gb_cap_authenticate_response {
- __u8 result_code;
- __u8 response[64];
- __u8 signature[0];
-} __packed;
-
-
-/* Bootrom Protocol */
-
-/* Version of the Greybus bootrom protocol we support */
-#define GB_BOOTROM_VERSION_MAJOR 0x00
-#define GB_BOOTROM_VERSION_MINOR 0x01
-
-/* Greybus bootrom request types */
-#define GB_BOOTROM_TYPE_VERSION 0x01
-#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02
-#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03
-#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04
-#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */
-#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */
-
-/* Greybus bootrom boot stages */
-#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */
-#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */
-#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */
-
-/* Greybus bootrom ready to boot status */
-#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */
-#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */
-#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */
-
-/* Max bootrom data fetch size in bytes */
-#define GB_BOOTROM_FETCH_MAX 2000
-
-struct gb_bootrom_version_request {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_bootrom_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-/* Bootrom protocol firmware size request/response */
-struct gb_bootrom_firmware_size_request {
- __u8 stage;
-} __packed;
-
-struct gb_bootrom_firmware_size_response {
- __le32 size;
-} __packed;
-
-/* Bootrom protocol get firmware request/response */
-struct gb_bootrom_get_firmware_request {
- __le32 offset;
- __le32 size;
-} __packed;
-
-struct gb_bootrom_get_firmware_response {
- __u8 data[0];
-} __packed;
-
-/* Bootrom protocol Ready to boot request */
-struct gb_bootrom_ready_to_boot_request {
- __u8 status;
-} __packed;
-/* Bootrom protocol Ready to boot response has no payload */
-
-/* Bootrom protocol get VID/PID request has no payload */
-struct gb_bootrom_get_vid_pid_response {
- __le32 vendor_id;
- __le32 product_id;
-} __packed;
-
-
-/* Power Supply */
-
-/* Greybus power supply request types */
-#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02
-#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03
-#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04
-#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05
-#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06
-#define GB_POWER_SUPPLY_TYPE_EVENT 0x07
-
-/* Greybus power supply battery technologies types */
-#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_TECH_NiMH 0x0001
-#define GB_POWER_SUPPLY_TECH_LION 0x0002
-#define GB_POWER_SUPPLY_TECH_LIPO 0x0003
-#define GB_POWER_SUPPLY_TECH_LiFe 0x0004
-#define GB_POWER_SUPPLY_TECH_NiCd 0x0005
-#define GB_POWER_SUPPLY_TECH_LiMn 0x0006
-
-/* Greybus power supply types */
-#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000
-#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001
-#define GB_POWER_SUPPLY_UPS_TYPE 0x0002
-#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003
-#define GB_POWER_SUPPLY_USB_TYPE 0x0004
-#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005
-#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006
-#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007
-
-/* Greybus power supply health values */
-#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001
-#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002
-#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003
-#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004
-#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005
-#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006
-#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007
-#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008
-
-/* Greybus power supply status values */
-#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001
-#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002
-#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003
-#define GB_POWER_SUPPLY_STATUS_FULL 0x0004
-
-/* Greybus power supply capacity level values */
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005
-
-/* Greybus power supply scope values */
-#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001
-#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002
-
-struct gb_power_supply_get_supplies_response {
- __u8 supplies_count;
-} __packed;
-
-struct gb_power_supply_get_description_request {
- __u8 psy_id;
-} __packed;
-
-struct gb_power_supply_get_description_response {
- __u8 manufacturer[32];
- __u8 model[32];
- __u8 serial_number[32];
- __le16 type;
- __u8 properties_count;
-} __packed;
-
-struct gb_power_supply_props_desc {
- __u8 property;
-#define GB_POWER_SUPPLY_PROP_STATUS 0x00
-#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01
-#define GB_POWER_SUPPLY_PROP_HEALTH 0x02
-#define GB_POWER_SUPPLY_PROP_PRESENT 0x03
-#define GB_POWER_SUPPLY_PROP_ONLINE 0x04
-#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05
-#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06
-#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F
-#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10
-#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11
-#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12
-#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13
-#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14
-#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15
-#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16
-#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17
-#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18
-#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19
-#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A
-#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B
-#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20
-#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21
-#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22
-#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23
-#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24
-#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25
-#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26
-#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27
-#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28
-#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29
-#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A
-#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B
-#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C
-#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D
-#define GB_POWER_SUPPLY_PROP_TEMP 0x2E
-#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F
-#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30
-#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31
-#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32
-#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33
-#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34
-#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35
-#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36
-#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37
-#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38
-#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39
-#define GB_POWER_SUPPLY_PROP_TYPE 0x3A
-#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B
-#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C
-#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D
- __u8 is_writeable;
-} __packed;
-
-struct gb_power_supply_get_property_descriptors_request {
- __u8 psy_id;
-} __packed;
-
-struct gb_power_supply_get_property_descriptors_response {
- __u8 properties_count;
- struct gb_power_supply_props_desc props[];
-} __packed;
-
-struct gb_power_supply_get_property_request {
- __u8 psy_id;
- __u8 property;
-} __packed;
-
-struct gb_power_supply_get_property_response {
- __le32 prop_val;
-};
-
-struct gb_power_supply_set_property_request {
- __u8 psy_id;
- __u8 property;
- __le32 prop_val;
-} __packed;
-
-struct gb_power_supply_event_request {
- __u8 psy_id;
- __u8 event;
-#define GB_POWER_SUPPLY_UPDATE 0x01
-} __packed;
-
-
-/* HID */
-
-/* Greybus HID operation types */
-#define GB_HID_TYPE_GET_DESC 0x02
-#define GB_HID_TYPE_GET_REPORT_DESC 0x03
-#define GB_HID_TYPE_PWR_ON 0x04
-#define GB_HID_TYPE_PWR_OFF 0x05
-#define GB_HID_TYPE_GET_REPORT 0x06
-#define GB_HID_TYPE_SET_REPORT 0x07
-#define GB_HID_TYPE_IRQ_EVENT 0x08
-
-/* Report type */
-#define GB_HID_INPUT_REPORT 0
-#define GB_HID_OUTPUT_REPORT 1
-#define GB_HID_FEATURE_REPORT 2
-
-/* Different request/response structures */
-/* HID get descriptor response */
-struct gb_hid_desc_response {
- __u8 bLength;
- __le16 wReportDescLength;
- __le16 bcdHID;
- __le16 wProductID;
- __le16 wVendorID;
- __u8 bCountryCode;
-} __packed;
-
-/* HID get report request/response */
-struct gb_hid_get_report_request {
- __u8 report_type;
- __u8 report_id;
-} __packed;
-
-/* HID set report request */
-struct gb_hid_set_report_request {
- __u8 report_type;
- __u8 report_id;
- __u8 report[0];
-} __packed;
-
-/* HID input report request, via interrupt pipe */
-struct gb_hid_input_report_request {
- __u8 report[0];
-} __packed;
-
-
-/* I2C */
-
-/* Greybus i2c request types */
-#define GB_I2C_TYPE_FUNCTIONALITY 0x02
-#define GB_I2C_TYPE_TRANSFER 0x05
-
-/* functionality request has no payload */
-struct gb_i2c_functionality_response {
- __le32 functionality;
-} __packed;
-
-/*
- * Outgoing data immediately follows the op count and ops array.
- * The data for each write (master -> slave) op in the array is sent
- * in order, with no (e.g. pad) bytes separating them.
- *
- * Short reads cause the entire transfer request to fail So response
- * payload consists only of bytes read, and the number of bytes is
- * exactly what was specified in the corresponding op. Like
- * outgoing data, the incoming data is in order and contiguous.
- */
-struct gb_i2c_transfer_op {
- __le16 addr;
- __le16 flags;
- __le16 size;
-} __packed;
-
-struct gb_i2c_transfer_request {
- __le16 op_count;
- struct gb_i2c_transfer_op ops[0]; /* op_count of these */
-} __packed;
-struct gb_i2c_transfer_response {
- __u8 data[0]; /* inbound data */
-} __packed;
-
-
-/* GPIO */
-
-/* Greybus GPIO request types */
-#define GB_GPIO_TYPE_LINE_COUNT 0x02
-#define GB_GPIO_TYPE_ACTIVATE 0x03
-#define GB_GPIO_TYPE_DEACTIVATE 0x04
-#define GB_GPIO_TYPE_GET_DIRECTION 0x05
-#define GB_GPIO_TYPE_DIRECTION_IN 0x06
-#define GB_GPIO_TYPE_DIRECTION_OUT 0x07
-#define GB_GPIO_TYPE_GET_VALUE 0x08
-#define GB_GPIO_TYPE_SET_VALUE 0x09
-#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a
-#define GB_GPIO_TYPE_IRQ_TYPE 0x0b
-#define GB_GPIO_TYPE_IRQ_MASK 0x0c
-#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d
-#define GB_GPIO_TYPE_IRQ_EVENT 0x0e
-
-#define GB_GPIO_IRQ_TYPE_NONE 0x00
-#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01
-#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
-#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
-#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
-#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
-
-/* line count request has no payload */
-struct gb_gpio_line_count_response {
- __u8 count;
-} __packed;
-
-struct gb_gpio_activate_request {
- __u8 which;
-} __packed;
-/* activate response has no payload */
-
-struct gb_gpio_deactivate_request {
- __u8 which;
-} __packed;
-/* deactivate response has no payload */
-
-struct gb_gpio_get_direction_request {
- __u8 which;
-} __packed;
-struct gb_gpio_get_direction_response {
- __u8 direction;
-} __packed;
-
-struct gb_gpio_direction_in_request {
- __u8 which;
-} __packed;
-/* direction in response has no payload */
-
-struct gb_gpio_direction_out_request {
- __u8 which;
- __u8 value;
-} __packed;
-/* direction out response has no payload */
-
-struct gb_gpio_get_value_request {
- __u8 which;
-} __packed;
-struct gb_gpio_get_value_response {
- __u8 value;
-} __packed;
-
-struct gb_gpio_set_value_request {
- __u8 which;
- __u8 value;
-} __packed;
-/* set value response has no payload */
-
-struct gb_gpio_set_debounce_request {
- __u8 which;
- __le16 usec;
-} __packed;
-/* debounce response has no payload */
-
-struct gb_gpio_irq_type_request {
- __u8 which;
- __u8 type;
-} __packed;
-/* irq type response has no payload */
-
-struct gb_gpio_irq_mask_request {
- __u8 which;
-} __packed;
-/* irq mask response has no payload */
-
-struct gb_gpio_irq_unmask_request {
- __u8 which;
-} __packed;
-/* irq unmask response has no payload */
-
-/* irq event requests originate on another module and are handled on the AP */
-struct gb_gpio_irq_event_request {
- __u8 which;
-} __packed;
-/* irq event has no response */
-
-
-/* PWM */
-
-/* Greybus PWM operation types */
-#define GB_PWM_TYPE_PWM_COUNT 0x02
-#define GB_PWM_TYPE_ACTIVATE 0x03
-#define GB_PWM_TYPE_DEACTIVATE 0x04
-#define GB_PWM_TYPE_CONFIG 0x05
-#define GB_PWM_TYPE_POLARITY 0x06
-#define GB_PWM_TYPE_ENABLE 0x07
-#define GB_PWM_TYPE_DISABLE 0x08
-
-/* pwm count request has no payload */
-struct gb_pwm_count_response {
- __u8 count;
-} __packed;
-
-struct gb_pwm_activate_request {
- __u8 which;
-} __packed;
-
-struct gb_pwm_deactivate_request {
- __u8 which;
-} __packed;
-
-struct gb_pwm_config_request {
- __u8 which;
- __le32 duty;
- __le32 period;
-} __packed;
-
-struct gb_pwm_polarity_request {
- __u8 which;
- __u8 polarity;
-} __packed;
-
-struct gb_pwm_enable_request {
- __u8 which;
-} __packed;
-
-struct gb_pwm_disable_request {
- __u8 which;
-} __packed;
-
-/* SPI */
-
-/* Should match up with modes in linux/spi/spi.h */
-#define GB_SPI_MODE_CPHA 0x01 /* clock phase */
-#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */
-#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */
-#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA)
-#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0)
-#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA)
-#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */
-#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */
-#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */
-#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */
-#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */
-#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */
-
-/* Should match up with flags in linux/spi/spi.h */
-#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */
-#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */
-#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */
-
-/* Greybus spi operation types */
-#define GB_SPI_TYPE_MASTER_CONFIG 0x02
-#define GB_SPI_TYPE_DEVICE_CONFIG 0x03
-#define GB_SPI_TYPE_TRANSFER 0x04
-
-/* mode request has no payload */
-struct gb_spi_master_config_response {
- __le32 bits_per_word_mask;
- __le32 min_speed_hz;
- __le32 max_speed_hz;
- __le16 mode;
- __le16 flags;
- __u8 num_chipselect;
-} __packed;
-
-struct gb_spi_device_config_request {
- __u8 chip_select;
-} __packed;
-
-struct gb_spi_device_config_response {
- __le16 mode;
- __u8 bits_per_word;
- __le32 max_speed_hz;
- __u8 device_type;
-#define GB_SPI_SPI_DEV 0x00
-#define GB_SPI_SPI_NOR 0x01
-#define GB_SPI_SPI_MODALIAS 0x02
- __u8 name[32];
-} __packed;
-
-/**
- * struct gb_spi_transfer - a read/write buffer pair
- * @speed_hz: Select a speed other than the device default for this transfer. If
- * 0 the default (from @spi_device) is used.
- * @len: size of rx and tx buffers (in bytes)
- * @delay_usecs: microseconds to delay after this transfer before (optionally)
- * changing the chipselect status, then starting the next transfer or
- * completing this spi_message.
- * @cs_change: affects chipselect after this transfer completes
- * @bits_per_word: select a bits_per_word other than the device default for this
- * transfer. If 0 the default (from @spi_device) is used.
- */
-struct gb_spi_transfer {
- __le32 speed_hz;
- __le32 len;
- __le16 delay_usecs;
- __u8 cs_change;
- __u8 bits_per_word;
- __u8 xfer_flags;
-#define GB_SPI_XFER_READ 0x01
-#define GB_SPI_XFER_WRITE 0x02
-#define GB_SPI_XFER_INPROGRESS 0x04
-} __packed;
-
-struct gb_spi_transfer_request {
- __u8 chip_select; /* of the spi device */
- __u8 mode; /* of the spi device */
- __le16 count;
- struct gb_spi_transfer transfers[0]; /* count of these */
-} __packed;
-
-struct gb_spi_transfer_response {
- __u8 data[0]; /* inbound data */
-} __packed;
-
-/* Version of the Greybus SVC protocol we support */
-#define GB_SVC_VERSION_MAJOR 0x00
-#define GB_SVC_VERSION_MINOR 0x01
-
-/* Greybus SVC request types */
-#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01
-#define GB_SVC_TYPE_SVC_HELLO 0x02
-#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03
-#define GB_SVC_TYPE_INTF_RESET 0x06
-#define GB_SVC_TYPE_CONN_CREATE 0x07
-#define GB_SVC_TYPE_CONN_DESTROY 0x08
-#define GB_SVC_TYPE_DME_PEER_GET 0x09
-#define GB_SVC_TYPE_DME_PEER_SET 0x0a
-#define GB_SVC_TYPE_ROUTE_CREATE 0x0b
-#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c
-#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d
-#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e
-#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f
-#define GB_SVC_TYPE_INTF_SET_PWRM 0x10
-#define GB_SVC_TYPE_INTF_EJECT 0x11
-#define GB_SVC_TYPE_PING 0x13
-#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14
-#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15
-#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16
-#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17
-#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
-#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
-#define GB_SVC_TYPE_TIMESYNC_PING 0x1a
-#define GB_SVC_TYPE_MODULE_INSERTED 0x1f
-#define GB_SVC_TYPE_MODULE_REMOVED 0x20
-#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21
-#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22
-#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23
-#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24
-#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25
-#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26
-#define GB_SVC_TYPE_INTF_ACTIVATE 0x27
-#define GB_SVC_TYPE_INTF_RESUME 0x28
-#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29
-#define GB_SVC_TYPE_INTF_OOPS 0x2a
-
-/* Greybus SVC protocol status values */
-#define GB_SVC_OP_SUCCESS 0x00
-#define GB_SVC_OP_UNKNOWN_ERROR 0x01
-#define GB_SVC_INTF_NOT_DETECTED 0x02
-#define GB_SVC_INTF_NO_UPRO_LINK 0x03
-#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04
-#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05
-#define GB_SVC_INTF_NO_V_SYS 0x06
-#define GB_SVC_INTF_V_CHG 0x07
-#define GB_SVC_INTF_WAKE_BUSY 0x08
-#define GB_SVC_INTF_NO_REFCLK 0x09
-#define GB_SVC_INTF_RELEASING 0x0a
-#define GB_SVC_INTF_NO_ORDER 0x0b
-#define GB_SVC_INTF_MBOX_SET 0x0c
-#define GB_SVC_INTF_BAD_MBOX 0x0d
-#define GB_SVC_INTF_OP_TIMEOUT 0x0e
-#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f
-
-struct gb_svc_version_request {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_svc_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-/* SVC protocol hello request */
-struct gb_svc_hello_request {
- __le16 endo_id;
- __u8 interface_id;
-} __packed;
-/* hello response has no payload */
-
-struct gb_svc_intf_device_id_request {
- __u8 intf_id;
- __u8 device_id;
-} __packed;
-/* device id response has no payload */
-
-struct gb_svc_intf_reset_request {
- __u8 intf_id;
-} __packed;
-/* interface reset response has no payload */
-
-struct gb_svc_intf_eject_request {
- __u8 intf_id;
-} __packed;
-/* interface eject response has no payload */
-
-struct gb_svc_conn_create_request {
- __u8 intf1_id;
- __le16 cport1_id;
- __u8 intf2_id;
- __le16 cport2_id;
- __u8 tc;
- __u8 flags;
-} __packed;
-/* connection create response has no payload */
-
-struct gb_svc_conn_destroy_request {
- __u8 intf1_id;
- __le16 cport1_id;
- __u8 intf2_id;
- __le16 cport2_id;
-} __packed;
-/* connection destroy response has no payload */
-
-struct gb_svc_dme_peer_get_request {
- __u8 intf_id;
- __le16 attr;
- __le16 selector;
-} __packed;
-
-struct gb_svc_dme_peer_get_response {
- __le16 result_code;
- __le32 attr_value;
-} __packed;
-
-struct gb_svc_dme_peer_set_request {
- __u8 intf_id;
- __le16 attr;
- __le16 selector;
- __le32 value;
-} __packed;
-
-struct gb_svc_dme_peer_set_response {
- __le16 result_code;
-} __packed;
-
-/* Greybus init-status values, currently retrieved using DME peer gets. */
-#define GB_INIT_SPI_BOOT_STARTED 0x02
-#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03
-#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04
-#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06
-#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09
-#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D
-
-struct gb_svc_route_create_request {
- __u8 intf1_id;
- __u8 dev1_id;
- __u8 intf2_id;
- __u8 dev2_id;
-} __packed;
-/* route create response has no payload */
-
-struct gb_svc_route_destroy_request {
- __u8 intf1_id;
- __u8 intf2_id;
-} __packed;
-/* route destroy response has no payload */
-
-/* used for svc_intf_vsys_{enable,disable} */
-struct gb_svc_intf_vsys_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_vsys_response {
- __u8 result_code;
-#define GB_SVC_INTF_VSYS_OK 0x00
- /* 0x01 is reserved */
-#define GB_SVC_INTF_VSYS_FAIL 0x02
-} __packed;
-
-/* used for svc_intf_refclk_{enable,disable} */
-struct gb_svc_intf_refclk_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_refclk_response {
- __u8 result_code;
-#define GB_SVC_INTF_REFCLK_OK 0x00
- /* 0x01 is reserved */
-#define GB_SVC_INTF_REFCLK_FAIL 0x02
-} __packed;
-
-/* used for svc_intf_unipro_{enable,disable} */
-struct gb_svc_intf_unipro_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_unipro_response {
- __u8 result_code;
-#define GB_SVC_INTF_UNIPRO_OK 0x00
- /* 0x01 is reserved */
-#define GB_SVC_INTF_UNIPRO_FAIL 0x02
-#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
-} __packed;
-
-#define GB_SVC_UNIPRO_FAST_MODE 0x01
-#define GB_SVC_UNIPRO_SLOW_MODE 0x02
-#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
-#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05
-#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07
-#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11
-#define GB_SVC_UNIPRO_OFF_MODE 0x12
-
-#define GB_SVC_SMALL_AMPLITUDE 0x01
-#define GB_SVC_LARGE_AMPLITUDE 0x02
-
-#define GB_SVC_NO_DE_EMPHASIS 0x00
-#define GB_SVC_SMALL_DE_EMPHASIS 0x01
-#define GB_SVC_LARGE_DE_EMPHASIS 0x02
-
-#define GB_SVC_PWRM_RXTERMINATION 0x01
-#define GB_SVC_PWRM_TXTERMINATION 0x02
-#define GB_SVC_PWRM_LINE_RESET 0x04
-#define GB_SVC_PWRM_SCRAMBLING 0x20
-
-#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001
-
-#define GB_SVC_UNIPRO_HS_SERIES_A 0x01
-#define GB_SVC_UNIPRO_HS_SERIES_B 0x02
-
-#define GB_SVC_SETPWRM_PWR_OK 0x00
-#define GB_SVC_SETPWRM_PWR_LOCAL 0x01
-#define GB_SVC_SETPWRM_PWR_REMOTE 0x02
-#define GB_SVC_SETPWRM_PWR_BUSY 0x03
-#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04
-#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05
-
-struct gb_svc_l2_timer_cfg {
- __le16 tsb_fc0_protection_timeout;
- __le16 tsb_tc0_replay_timeout;
- __le16 tsb_afc0_req_timeout;
- __le16 tsb_fc1_protection_timeout;
- __le16 tsb_tc1_replay_timeout;
- __le16 tsb_afc1_req_timeout;
- __le16 reserved_for_tc2[3];
- __le16 reserved_for_tc3[3];
-} __packed;
-
-struct gb_svc_intf_set_pwrm_request {
- __u8 intf_id;
- __u8 hs_series;
- __u8 tx_mode;
- __u8 tx_gear;
- __u8 tx_nlanes;
- __u8 tx_amplitude;
- __u8 tx_hs_equalizer;
- __u8 rx_mode;
- __u8 rx_gear;
- __u8 rx_nlanes;
- __u8 flags;
- __le32 quirks;
- struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
-} __packed;
-
-struct gb_svc_intf_set_pwrm_response {
- __u8 result_code;
-} __packed;
-
-struct gb_svc_key_event_request {
- __le16 key_code;
-#define GB_KEYCODE_ARA 0x00
-
- __u8 key_event;
-#define GB_SVC_KEY_RELEASED 0x00
-#define GB_SVC_KEY_PRESSED 0x01
-} __packed;
-
-#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254
-
-struct gb_svc_pwrmon_rail_count_get_response {
- __u8 rail_count;
-} __packed;
-
-#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32
-
-struct gb_svc_pwrmon_rail_names_get_response {
- __u8 status;
- __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
-} __packed;
-
-#define GB_SVC_PWRMON_TYPE_CURR 0x01
-#define GB_SVC_PWRMON_TYPE_VOL 0x02
-#define GB_SVC_PWRMON_TYPE_PWR 0x03
-
-#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00
-#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01
-#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02
-#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03
-
-struct gb_svc_pwrmon_sample_get_request {
- __u8 rail_id;
- __u8 measurement_type;
-} __packed;
-
-struct gb_svc_pwrmon_sample_get_response {
- __u8 result;
- __le32 measurement;
-} __packed;
-
-struct gb_svc_pwrmon_intf_sample_get_request {
- __u8 intf_id;
- __u8 measurement_type;
-} __packed;
-
-struct gb_svc_pwrmon_intf_sample_get_response {
- __u8 result;
- __le32 measurement;
-} __packed;
-
-#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
-
-struct gb_svc_module_inserted_request {
- __u8 primary_intf_id;
- __u8 intf_count;
- __le16 flags;
-} __packed;
-/* module_inserted response has no payload */
-
-struct gb_svc_module_removed_request {
- __u8 primary_intf_id;
-} __packed;
-/* module_removed response has no payload */
-
-struct gb_svc_intf_activate_request {
- __u8 intf_id;
-} __packed;
-
-#define GB_SVC_INTF_TYPE_UNKNOWN 0x00
-#define GB_SVC_INTF_TYPE_DUMMY 0x01
-#define GB_SVC_INTF_TYPE_UNIPRO 0x02
-#define GB_SVC_INTF_TYPE_GREYBUS 0x03
-
-struct gb_svc_intf_activate_response {
- __u8 status;
- __u8 intf_type;
-} __packed;
-
-struct gb_svc_intf_resume_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_resume_response {
- __u8 status;
-} __packed;
-
-#define GB_SVC_INTF_MAILBOX_NONE 0x00
-#define GB_SVC_INTF_MAILBOX_AP 0x01
-#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02
-
-struct gb_svc_intf_mailbox_event_request {
- __u8 intf_id;
- __le16 result_code;
- __le32 mailbox;
-} __packed;
-/* intf_mailbox_event response has no payload */
-
-struct gb_svc_intf_oops_request {
- __u8 intf_id;
- __u8 reason;
-} __packed;
-/* intf_oops response has no payload */
-
-
-/* RAW */
-
-/* Greybus raw request types */
-#define GB_RAW_TYPE_SEND 0x02
-
-struct gb_raw_send_request {
- __le32 len;
- __u8 data[0];
-} __packed;
-
-
-/* UART */
-
-/* Greybus UART operation types */
-#define GB_UART_TYPE_SEND_DATA 0x02
-#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */
-#define GB_UART_TYPE_SET_LINE_CODING 0x04
-#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05
-#define GB_UART_TYPE_SEND_BREAK 0x06
-#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */
-#define GB_UART_TYPE_RECEIVE_CREDITS 0x08
-#define GB_UART_TYPE_FLUSH_FIFOS 0x09
-
-/* Represents data from AP -> Module */
-struct gb_uart_send_data_request {
- __le16 size;
- __u8 data[0];
-} __packed;
-
-/* recv-data-request flags */
-#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */
-#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */
-#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */
-#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */
-
-/* Represents data from Module -> AP */
-struct gb_uart_recv_data_request {
- __le16 size;
- __u8 flags;
- __u8 data[0];
-} __packed;
-
-struct gb_uart_receive_credits_request {
- __le16 count;
-} __packed;
-
-struct gb_uart_set_line_coding_request {
- __le32 rate;
- __u8 format;
-#define GB_SERIAL_1_STOP_BITS 0
-#define GB_SERIAL_1_5_STOP_BITS 1
-#define GB_SERIAL_2_STOP_BITS 2
-
- __u8 parity;
-#define GB_SERIAL_NO_PARITY 0
-#define GB_SERIAL_ODD_PARITY 1
-#define GB_SERIAL_EVEN_PARITY 2
-#define GB_SERIAL_MARK_PARITY 3
-#define GB_SERIAL_SPACE_PARITY 4
-
- __u8 data_bits;
-
- __u8 flow_control;
-#define GB_SERIAL_AUTO_RTSCTS_EN 0x1
-} __packed;
-
-/* output control lines */
-#define GB_UART_CTRL_DTR 0x01
-#define GB_UART_CTRL_RTS 0x02
-
-struct gb_uart_set_control_line_state_request {
- __u8 control;
-} __packed;
-
-struct gb_uart_set_break_request {
- __u8 state;
-} __packed;
-
-/* input control lines and line errors */
-#define GB_UART_CTRL_DCD 0x01
-#define GB_UART_CTRL_DSR 0x02
-#define GB_UART_CTRL_RI 0x04
-
-struct gb_uart_serial_state_request {
- __u8 control;
-} __packed;
-
-struct gb_uart_serial_flush_request {
- __u8 flags;
-#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01
-#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02
-} __packed;
-
-/* Loopback */
-
-/* Greybus loopback request types */
-#define GB_LOOPBACK_TYPE_PING 0x02
-#define GB_LOOPBACK_TYPE_TRANSFER 0x03
-#define GB_LOOPBACK_TYPE_SINK 0x04
-
-/*
- * Loopback request/response header format should be identical
- * to simplify bandwidth and data movement analysis.
- */
-struct gb_loopback_transfer_request {
- __le32 len;
- __le32 reserved0;
- __le32 reserved1;
- __u8 data[0];
-} __packed;
-
-struct gb_loopback_transfer_response {
- __le32 len;
- __le32 reserved0;
- __le32 reserved1;
- __u8 data[0];
-} __packed;
-
-/* SDIO */
-/* Greybus SDIO operation types */
-#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02
-#define GB_SDIO_TYPE_SET_IOS 0x03
-#define GB_SDIO_TYPE_COMMAND 0x04
-#define GB_SDIO_TYPE_TRANSFER 0x05
-#define GB_SDIO_TYPE_EVENT 0x06
-
-/* get caps response: request has no payload */
-struct gb_sdio_get_caps_response {
- __le32 caps;
-#define GB_SDIO_CAP_NONREMOVABLE 0x00000001
-#define GB_SDIO_CAP_4_BIT_DATA 0x00000002
-#define GB_SDIO_CAP_8_BIT_DATA 0x00000004
-#define GB_SDIO_CAP_MMC_HS 0x00000008
-#define GB_SDIO_CAP_SD_HS 0x00000010
-#define GB_SDIO_CAP_ERASE 0x00000020
-#define GB_SDIO_CAP_1_2V_DDR 0x00000040
-#define GB_SDIO_CAP_1_8V_DDR 0x00000080
-#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100
-#define GB_SDIO_CAP_UHS_SDR12 0x00000200
-#define GB_SDIO_CAP_UHS_SDR25 0x00000400
-#define GB_SDIO_CAP_UHS_SDR50 0x00000800
-#define GB_SDIO_CAP_UHS_SDR104 0x00001000
-#define GB_SDIO_CAP_UHS_DDR50 0x00002000
-#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000
-#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000
-#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000
-#define GB_SDIO_CAP_HS200_1_2V 0x00020000
-#define GB_SDIO_CAP_HS200_1_8V 0x00040000
-#define GB_SDIO_CAP_HS400_1_2V 0x00080000
-#define GB_SDIO_CAP_HS400_1_8V 0x00100000
-
- /* see possible values below at vdd */
- __le32 ocr;
- __le32 f_min;
- __le32 f_max;
- __le16 max_blk_count;
- __le16 max_blk_size;
-} __packed;
-
-/* set ios request: response has no payload */
-struct gb_sdio_set_ios_request {
- __le32 clock;
- __le32 vdd;
-#define GB_SDIO_VDD_165_195 0x00000001
-#define GB_SDIO_VDD_20_21 0x00000002
-#define GB_SDIO_VDD_21_22 0x00000004
-#define GB_SDIO_VDD_22_23 0x00000008
-#define GB_SDIO_VDD_23_24 0x00000010
-#define GB_SDIO_VDD_24_25 0x00000020
-#define GB_SDIO_VDD_25_26 0x00000040
-#define GB_SDIO_VDD_26_27 0x00000080
-#define GB_SDIO_VDD_27_28 0x00000100
-#define GB_SDIO_VDD_28_29 0x00000200
-#define GB_SDIO_VDD_29_30 0x00000400
-#define GB_SDIO_VDD_30_31 0x00000800
-#define GB_SDIO_VDD_31_32 0x00001000
-#define GB_SDIO_VDD_32_33 0x00002000
-#define GB_SDIO_VDD_33_34 0x00004000
-#define GB_SDIO_VDD_34_35 0x00008000
-#define GB_SDIO_VDD_35_36 0x00010000
-
- __u8 bus_mode;
-#define GB_SDIO_BUSMODE_OPENDRAIN 0x00
-#define GB_SDIO_BUSMODE_PUSHPULL 0x01
-
- __u8 power_mode;
-#define GB_SDIO_POWER_OFF 0x00
-#define GB_SDIO_POWER_UP 0x01
-#define GB_SDIO_POWER_ON 0x02
-#define GB_SDIO_POWER_UNDEFINED 0x03
-
- __u8 bus_width;
-#define GB_SDIO_BUS_WIDTH_1 0x00
-#define GB_SDIO_BUS_WIDTH_4 0x02
-#define GB_SDIO_BUS_WIDTH_8 0x03
-
- __u8 timing;
-#define GB_SDIO_TIMING_LEGACY 0x00
-#define GB_SDIO_TIMING_MMC_HS 0x01
-#define GB_SDIO_TIMING_SD_HS 0x02
-#define GB_SDIO_TIMING_UHS_SDR12 0x03
-#define GB_SDIO_TIMING_UHS_SDR25 0x04
-#define GB_SDIO_TIMING_UHS_SDR50 0x05
-#define GB_SDIO_TIMING_UHS_SDR104 0x06
-#define GB_SDIO_TIMING_UHS_DDR50 0x07
-#define GB_SDIO_TIMING_MMC_DDR52 0x08
-#define GB_SDIO_TIMING_MMC_HS200 0x09
-#define GB_SDIO_TIMING_MMC_HS400 0x0A
-
- __u8 signal_voltage;
-#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00
-#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01
-#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02
-
- __u8 drv_type;
-#define GB_SDIO_SET_DRIVER_TYPE_B 0x00
-#define GB_SDIO_SET_DRIVER_TYPE_A 0x01
-#define GB_SDIO_SET_DRIVER_TYPE_C 0x02
-#define GB_SDIO_SET_DRIVER_TYPE_D 0x03
-} __packed;
-
-/* command request */
-struct gb_sdio_command_request {
- __u8 cmd;
- __u8 cmd_flags;
-#define GB_SDIO_RSP_NONE 0x00
-#define GB_SDIO_RSP_PRESENT 0x01
-#define GB_SDIO_RSP_136 0x02
-#define GB_SDIO_RSP_CRC 0x04
-#define GB_SDIO_RSP_BUSY 0x08
-#define GB_SDIO_RSP_OPCODE 0x10
-
- __u8 cmd_type;
-#define GB_SDIO_CMD_AC 0x00
-#define GB_SDIO_CMD_ADTC 0x01
-#define GB_SDIO_CMD_BC 0x02
-#define GB_SDIO_CMD_BCR 0x03
-
- __le32 cmd_arg;
- __le16 data_blocks;
- __le16 data_blksz;
-} __packed;
-
-struct gb_sdio_command_response {
- __le32 resp[4];
-} __packed;
-
-/* transfer request */
-struct gb_sdio_transfer_request {
- __u8 data_flags;
-#define GB_SDIO_DATA_WRITE 0x01
-#define GB_SDIO_DATA_READ 0x02
-#define GB_SDIO_DATA_STREAM 0x04
-
- __le16 data_blocks;
- __le16 data_blksz;
- __u8 data[0];
-} __packed;
-
-struct gb_sdio_transfer_response {
- __le16 data_blocks;
- __le16 data_blksz;
- __u8 data[0];
-} __packed;
-
-/* event request: generated by module and is defined as unidirectional */
-struct gb_sdio_event_request {
- __u8 event;
-#define GB_SDIO_CARD_INSERTED 0x01
-#define GB_SDIO_CARD_REMOVED 0x02
-#define GB_SDIO_WP 0x04
-} __packed;
-
-/* Camera */
-
-/* Greybus Camera request types */
-#define GB_CAMERA_TYPE_CAPABILITIES 0x02
-#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03
-#define GB_CAMERA_TYPE_CAPTURE 0x04
-#define GB_CAMERA_TYPE_FLUSH 0x05
-#define GB_CAMERA_TYPE_METADATA 0x06
-
-#define GB_CAMERA_MAX_STREAMS 4
-#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
-
-/* Greybus Camera Configure Streams request payload */
-struct gb_camera_stream_config_request {
- __le16 width;
- __le16 height;
- __le16 format;
- __le16 padding;
-} __packed;
-
-struct gb_camera_configure_streams_request {
- __u8 num_streams;
- __u8 flags;
-#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
- __le16 padding;
- struct gb_camera_stream_config_request config[0];
-} __packed;
-
-/* Greybus Camera Configure Streams response payload */
-struct gb_camera_stream_config_response {
- __le16 width;
- __le16 height;
- __le16 format;
- __u8 virtual_channel;
- __u8 data_type[2];
- __le16 max_pkt_size;
- __u8 padding;
- __le32 max_size;
-} __packed;
-
-struct gb_camera_configure_streams_response {
- __u8 num_streams;
-#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01
- __u8 flags;
- __u8 padding[2];
- __le32 data_rate;
- struct gb_camera_stream_config_response config[0];
-};
-
-/* Greybus Camera Capture request payload - response has no payload */
-struct gb_camera_capture_request {
- __le32 request_id;
- __u8 streams;
- __u8 padding;
- __le16 num_frames;
- __u8 settings[0];
-} __packed;
-
-/* Greybus Camera Flush response payload - request has no payload */
-struct gb_camera_flush_response {
- __le32 request_id;
-} __packed;
-
-/* Greybus Camera Metadata request payload - operation has no response */
-struct gb_camera_metadata_request {
- __le32 request_id;
- __le16 frame_number;
- __u8 stream;
- __u8 padding;
- __u8 metadata[0];
-} __packed;
-
-/* Lights */
-
-/* Greybus Lights request types */
-#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02
-#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03
-#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04
-#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05
-#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06
-#define GB_LIGHTS_TYPE_SET_BLINK 0x07
-#define GB_LIGHTS_TYPE_SET_COLOR 0x08
-#define GB_LIGHTS_TYPE_SET_FADE 0x09
-#define GB_LIGHTS_TYPE_EVENT 0x0A
-#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B
-#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C
-#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D
-#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E
-
-/* Greybus Light modes */
-
-/*
- * if you add any specific mode below, update also the
- * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
- */
-#define GB_CHANNEL_MODE_NONE 0x00000000
-#define GB_CHANNEL_MODE_BATTERY 0x00000001
-#define GB_CHANNEL_MODE_POWER 0x00000002
-#define GB_CHANNEL_MODE_WIRELESS 0x00000004
-#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008
-#define GB_CHANNEL_MODE_KEYBOARD 0x00000010
-#define GB_CHANNEL_MODE_BUTTONS 0x00000020
-#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040
-#define GB_CHANNEL_MODE_ATTENTION 0x00000080
-#define GB_CHANNEL_MODE_FLASH 0x00000100
-#define GB_CHANNEL_MODE_TORCH 0x00000200
-#define GB_CHANNEL_MODE_INDICATOR 0x00000400
-
-/* Lights Mode valid bit values */
-#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF
-#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000
-
-/* Greybus Light Channels Flags */
-#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001
-#define GB_LIGHT_CHANNEL_FADER 0x00000002
-#define GB_LIGHT_CHANNEL_BLINK 0x00000004
-
-/* get count of lights in module */
-struct gb_lights_get_lights_response {
- __u8 lights_count;
-} __packed;
-
-/* light config request payload */
-struct gb_lights_get_light_config_request {
- __u8 id;
-} __packed;
-
-/* light config response payload */
-struct gb_lights_get_light_config_response {
- __u8 channel_count;
- __u8 name[32];
-} __packed;
-
-/* channel config request payload */
-struct gb_lights_get_channel_config_request {
- __u8 light_id;
- __u8 channel_id;
-} __packed;
-
-/* channel flash config request payload */
-struct gb_lights_get_channel_flash_config_request {
- __u8 light_id;
- __u8 channel_id;
-} __packed;
-
-/* channel config response payload */
-struct gb_lights_get_channel_config_response {
- __u8 max_brightness;
- __le32 flags;
- __le32 color;
- __u8 color_name[32];
- __le32 mode;
- __u8 mode_name[32];
-} __packed;
-
-/* channel flash config response payload */
-struct gb_lights_get_channel_flash_config_response {
- __le32 intensity_min_uA;
- __le32 intensity_max_uA;
- __le32 intensity_step_uA;
- __le32 timeout_min_us;
- __le32 timeout_max_us;
- __le32 timeout_step_us;
-} __packed;
-
-/* blink request payload: response have no payload */
-struct gb_lights_blink_request {
- __u8 light_id;
- __u8 channel_id;
- __le16 time_on_ms;
- __le16 time_off_ms;
-} __packed;
-
-/* set brightness request payload: response have no payload */
-struct gb_lights_set_brightness_request {
- __u8 light_id;
- __u8 channel_id;
- __u8 brightness;
-} __packed;
-
-/* set color request payload: response have no payload */
-struct gb_lights_set_color_request {
- __u8 light_id;
- __u8 channel_id;
- __le32 color;
-} __packed;
-
-/* set fade request payload: response have no payload */
-struct gb_lights_set_fade_request {
- __u8 light_id;
- __u8 channel_id;
- __u8 fade_in;
- __u8 fade_out;
-} __packed;
-
-/* event request: generated by module */
-struct gb_lights_event_request {
- __u8 light_id;
- __u8 event;
-#define GB_LIGHTS_LIGHT_CONFIG 0x01
-} __packed;
-
-/* set flash intensity request payload: response have no payload */
-struct gb_lights_set_flash_intensity_request {
- __u8 light_id;
- __u8 channel_id;
- __le32 intensity_uA;
-} __packed;
-
-/* set flash strobe state request payload: response have no payload */
-struct gb_lights_set_flash_strobe_request {
- __u8 light_id;
- __u8 channel_id;
- __u8 state;
-} __packed;
-
-/* set flash timeout request payload: response have no payload */
-struct gb_lights_set_flash_timeout_request {
- __u8 light_id;
- __u8 channel_id;
- __le32 timeout_us;
-} __packed;
-
-/* get flash fault request payload */
-struct gb_lights_get_flash_fault_request {
- __u8 light_id;
- __u8 channel_id;
-} __packed;
-
-/* get flash fault response payload */
-struct gb_lights_get_flash_fault_response {
- __le32 fault;
-#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000
-#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001
-#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002
-#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004
-#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008
-#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010
-#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020
-#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040
-#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080
-} __packed;
-
-/* Audio */
-
-#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02
-#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03
-#define GB_AUDIO_TYPE_GET_CONTROL 0x04
-#define GB_AUDIO_TYPE_SET_CONTROL 0x05
-#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06
-#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07
-#define GB_AUDIO_TYPE_GET_PCM 0x08
-#define GB_AUDIO_TYPE_SET_PCM 0x09
-#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a
- /* 0x0b unused */
-#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c
-#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d
-#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e
- /* 0x0f unused */
-#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10
-#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11
-#define GB_AUDIO_TYPE_JACK_EVENT 0x12
-#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13
-#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14
-#define GB_AUDIO_TYPE_SEND_DATA 0x15
-
-/* Module must be able to buffer 10ms of audio data, minimum */
-#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000
-
-#define GB_AUDIO_PCM_NAME_MAX 32
-#define AUDIO_DAI_NAME_MAX 32
-#define AUDIO_CONTROL_NAME_MAX 32
-#define AUDIO_CTL_ELEM_NAME_MAX 44
-#define AUDIO_ENUM_NAME_MAX 64
-#define AUDIO_WIDGET_NAME_MAX 32
-
-/* See SNDRV_PCM_FMTBIT_* in Linux source */
-#define GB_AUDIO_PCM_FMT_S8 BIT(0)
-#define GB_AUDIO_PCM_FMT_U8 BIT(1)
-#define GB_AUDIO_PCM_FMT_S16_LE BIT(2)
-#define GB_AUDIO_PCM_FMT_S16_BE BIT(3)
-#define GB_AUDIO_PCM_FMT_U16_LE BIT(4)
-#define GB_AUDIO_PCM_FMT_U16_BE BIT(5)
-#define GB_AUDIO_PCM_FMT_S24_LE BIT(6)
-#define GB_AUDIO_PCM_FMT_S24_BE BIT(7)
-#define GB_AUDIO_PCM_FMT_U24_LE BIT(8)
-#define GB_AUDIO_PCM_FMT_U24_BE BIT(9)
-#define GB_AUDIO_PCM_FMT_S32_LE BIT(10)
-#define GB_AUDIO_PCM_FMT_S32_BE BIT(11)
-#define GB_AUDIO_PCM_FMT_U32_LE BIT(12)
-#define GB_AUDIO_PCM_FMT_U32_BE BIT(13)
-
-/* See SNDRV_PCM_RATE_* in Linux source */
-#define GB_AUDIO_PCM_RATE_5512 BIT(0)
-#define GB_AUDIO_PCM_RATE_8000 BIT(1)
-#define GB_AUDIO_PCM_RATE_11025 BIT(2)
-#define GB_AUDIO_PCM_RATE_16000 BIT(3)
-#define GB_AUDIO_PCM_RATE_22050 BIT(4)
-#define GB_AUDIO_PCM_RATE_32000 BIT(5)
-#define GB_AUDIO_PCM_RATE_44100 BIT(6)
-#define GB_AUDIO_PCM_RATE_48000 BIT(7)
-#define GB_AUDIO_PCM_RATE_64000 BIT(8)
-#define GB_AUDIO_PCM_RATE_88200 BIT(9)
-#define GB_AUDIO_PCM_RATE_96000 BIT(10)
-#define GB_AUDIO_PCM_RATE_176400 BIT(11)
-#define GB_AUDIO_PCM_RATE_192000 BIT(12)
-
-#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1
-#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2
-
-#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0)
-#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1)
-
-/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
-#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01
-#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02
-#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03
-#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06
-
-/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
-#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00
-#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01
-#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02
-#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03
-#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04
-#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05
-#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06
-
-/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
-#define GB_AUDIO_ACCESS_READ BIT(0)
-#define GB_AUDIO_ACCESS_WRITE BIT(1)
-#define GB_AUDIO_ACCESS_VOLATILE BIT(2)
-#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3)
-#define GB_AUDIO_ACCESS_TLV_READ BIT(4)
-#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5)
-#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6)
-#define GB_AUDIO_ACCESS_INACTIVE BIT(7)
-#define GB_AUDIO_ACCESS_LOCK BIT(8)
-#define GB_AUDIO_ACCESS_OWNER BIT(9)
-
-/* enum snd_soc_dapm_type */
-#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0
-#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1
-#define GB_AUDIO_WIDGET_TYPE_MUX 0x2
-#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3
-#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4
-#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5
-#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6
-#define GB_AUDIO_WIDGET_TYPE_PGA 0x7
-#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8
-#define GB_AUDIO_WIDGET_TYPE_ADC 0x9
-#define GB_AUDIO_WIDGET_TYPE_DAC 0xa
-#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb
-#define GB_AUDIO_WIDGET_TYPE_MIC 0xc
-#define GB_AUDIO_WIDGET_TYPE_HP 0xd
-#define GB_AUDIO_WIDGET_TYPE_SPK 0xe
-#define GB_AUDIO_WIDGET_TYPE_LINE 0xf
-#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10
-#define GB_AUDIO_WIDGET_TYPE_VMID 0x11
-#define GB_AUDIO_WIDGET_TYPE_PRE 0x12
-#define GB_AUDIO_WIDGET_TYPE_POST 0x13
-#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14
-#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15
-#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16
-#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17
-#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18
-#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19
-#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a
-#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b
-#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c
-
-#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01
-#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02
-
-#define GB_AUDIO_JACK_EVENT_INSERTION 0x1
-#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2
-
-#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1
-#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2
-
-#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1
-#define GB_AUDIO_STREAMING_EVENT_HALT 0x2
-#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3
-#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4
-#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5
-#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6
-#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7
-#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8
-#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9
-
-#define GB_AUDIO_INVALID_INDEX 0xff
-
-/* enum snd_jack_types */
-#define GB_AUDIO_JACK_HEADPHONE 0x0000001
-#define GB_AUDIO_JACK_MICROPHONE 0x0000002
-#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \
- GB_AUDIO_JACK_MICROPHONE)
-#define GB_AUDIO_JACK_LINEOUT 0x0000004
-#define GB_AUDIO_JACK_MECHANICAL 0x0000008
-#define GB_AUDIO_JACK_VIDEOOUT 0x0000010
-#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \
- GB_AUDIO_JACK_VIDEOOUT)
-#define GB_AUDIO_JACK_LINEIN 0x0000020
-#define GB_AUDIO_JACK_OC_HPHL 0x0000040
-#define GB_AUDIO_JACK_OC_HPHR 0x0000080
-#define GB_AUDIO_JACK_MICROPHONE2 0x0000200
-#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \
- GB_AUDIO_JACK_MICROPHONE | \
- GB_AUDIO_JACK_MICROPHONE2)
-/* Kept separate from switches to facilitate implementation */
-#define GB_AUDIO_JACK_BTN_0 0x4000000
-#define GB_AUDIO_JACK_BTN_1 0x2000000
-#define GB_AUDIO_JACK_BTN_2 0x1000000
-#define GB_AUDIO_JACK_BTN_3 0x0800000
-
-struct gb_audio_pcm {
- __u8 stream_name[GB_AUDIO_PCM_NAME_MAX];
- __le32 formats; /* GB_AUDIO_PCM_FMT_* */
- __le32 rates; /* GB_AUDIO_PCM_RATE_* */
- __u8 chan_min;
- __u8 chan_max;
- __u8 sig_bits; /* number of bits of content */
-} __packed;
-
-struct gb_audio_dai {
- __u8 name[AUDIO_DAI_NAME_MAX];
- __le16 data_cport;
- struct gb_audio_pcm capture;
- struct gb_audio_pcm playback;
-} __packed;
-
-struct gb_audio_integer {
- __le32 min;
- __le32 max;
- __le32 step;
-} __packed;
-
-struct gb_audio_integer64 {
- __le64 min;
- __le64 max;
- __le64 step;
-} __packed;
-
-struct gb_audio_enumerated {
- __le32 items;
- __le16 names_length;
- __u8 names[0];
-} __packed;
-
-struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
- __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */
- __le16 dimen[4];
- union {
- struct gb_audio_integer integer;
- struct gb_audio_integer64 integer64;
- struct gb_audio_enumerated enumerated;
- } value;
-} __packed;
-
-struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
- __le64 timestamp; /* XXX needed? */
- union {
- __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */
- __le64 integer64_value[2];
- __le32 enumerated_item[2];
- } value;
-} __packed;
-
-struct gb_audio_control {
- __u8 name[AUDIO_CONTROL_NAME_MAX];
- __u8 id; /* 0-63 */
- __u8 iface; /* GB_AUDIO_IFACE_* */
- __le16 data_cport;
- __le32 access; /* GB_AUDIO_ACCESS_* */
- __u8 count; /* count of same elements */
- __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */
- struct gb_audio_ctl_elem_info info;
-} __packed;
-
-struct gb_audio_widget {
- __u8 name[AUDIO_WIDGET_NAME_MAX];
- __u8 sname[AUDIO_WIDGET_NAME_MAX];
- __u8 id;
- __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
- __u8 state; /* GB_AUDIO_WIDGET_STATE_* */
- __u8 ncontrols;
- struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
-} __packed;
-
-struct gb_audio_route {
- __u8 source_id; /* widget id */
- __u8 destination_id; /* widget id */
- __u8 control_id; /* 0-63 */
- __u8 index; /* Selection within the control */
-} __packed;
-
-struct gb_audio_topology {
- __u8 num_dais;
- __u8 num_controls;
- __u8 num_widgets;
- __u8 num_routes;
- __le32 size_dais;
- __le32 size_controls;
- __le32 size_widgets;
- __le32 size_routes;
- __le32 jack_type;
- /*
- * struct gb_audio_dai dai[num_dais];
- * struct gb_audio_control controls[num_controls];
- * struct gb_audio_widget widgets[num_widgets];
- * struct gb_audio_route routes[num_routes];
- */
- __u8 data[0];
-} __packed;
-
-struct gb_audio_get_topology_size_response {
- __le16 size;
-} __packed;
-
-struct gb_audio_get_topology_response {
- struct gb_audio_topology topology;
-} __packed;
-
-struct gb_audio_get_control_request {
- __u8 control_id;
- __u8 index;
-} __packed;
-
-struct gb_audio_get_control_response {
- struct gb_audio_ctl_elem_value value;
-} __packed;
-
-struct gb_audio_set_control_request {
- __u8 control_id;
- __u8 index;
- struct gb_audio_ctl_elem_value value;
-} __packed;
-
-struct gb_audio_enable_widget_request {
- __u8 widget_id;
-} __packed;
-
-struct gb_audio_disable_widget_request {
- __u8 widget_id;
-} __packed;
-
-struct gb_audio_get_pcm_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_get_pcm_response {
- __le32 format;
- __le32 rate;
- __u8 channels;
- __u8 sig_bits;
-} __packed;
-
-struct gb_audio_set_pcm_request {
- __le16 data_cport;
- __le32 format;
- __le32 rate;
- __u8 channels;
- __u8 sig_bits;
-} __packed;
-
-struct gb_audio_set_tx_data_size_request {
- __le16 data_cport;
- __le16 size;
-} __packed;
-
-struct gb_audio_activate_tx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_deactivate_tx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_set_rx_data_size_request {
- __le16 data_cport;
- __le16 size;
-} __packed;
-
-struct gb_audio_activate_rx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_deactivate_rx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_jack_event_request {
- __u8 widget_id;
- __u8 jack_attribute;
- __u8 event;
-} __packed;
-
-struct gb_audio_button_event_request {
- __u8 widget_id;
- __u8 button_id;
- __u8 event;
-} __packed;
-
-struct gb_audio_streaming_event_request {
- __le16 data_cport;
- __u8 event;
-} __packed;
-
-struct gb_audio_send_data_request {
- __le64 timestamp;
- __u8 data[0];
-} __packed;
-
-
-/* Log */
-
-/* operations */
-#define GB_LOG_TYPE_SEND_LOG 0x02
-
-/* length */
-#define GB_LOG_MAX_LEN 1024
-
-struct gb_log_send_log_request {
- __le16 len;
- __u8 msg[0];
-} __packed;
-
-#endif /* __GREYBUS_PROTOCOLS_H */
-
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
deleted file mode 100644
index 6cf024a20a58..000000000000
--- a/drivers/staging/greybus/hd.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Host Device
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#ifndef __HD_H
-#define __HD_H
-
-struct gb_host_device;
-struct gb_message;
-
-struct gb_hd_driver {
- size_t hd_priv_size;
-
- int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
- unsigned long flags);
- void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
- unsigned long flags);
- int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
- u8 phase, unsigned int timeout);
- int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
- size_t peer_space, unsigned int timeout);
- int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
-
- int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
- struct gb_message *message, gfp_t gfp_mask);
- void (*message_cancel)(struct gb_message *message);
- int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
- int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
- int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
- bool async);
-};
-
-struct gb_host_device {
- struct device dev;
- int bus_id;
- const struct gb_hd_driver *driver;
-
- struct list_head modules;
- struct list_head connections;
- struct ida cport_id_map;
-
- /* Number of CPorts supported by the UniPro IP */
- size_t num_cports;
-
- /* Host device buffer constraints */
- size_t buffer_size_max;
-
- struct gb_svc *svc;
- /* Private data for the host driver */
- unsigned long hd_priv[0] __aligned(sizeof(s64));
-};
-#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
-
-int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
-void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
-int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
- unsigned long flags);
-void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
-
-struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
- struct device *parent,
- size_t buffer_size_max,
- size_t num_cports);
-int gb_hd_add(struct gb_host_device *hd);
-void gb_hd_del(struct gb_host_device *hd);
-void gb_hd_shutdown(struct gb_host_device *hd);
-void gb_hd_put(struct gb_host_device *hd);
-int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
- bool in_irq);
-
-int gb_hd_init(void);
-void gb_hd_exit(void);
-
-#endif /* __HD_H */
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 8ab810bf5716..04bfd9110502 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -12,8 +12,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
/* Greybus HID device's structure */
struct gb_hid {
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
index b2522043a1a4..ab06fc3b9e7e 100644
--- a/drivers/staging/greybus/i2c.c
+++ b/drivers/staging/greybus/i2c.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_i2c_device {
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
deleted file mode 100644
index 1c00c5bb3ec9..000000000000
--- a/drivers/staging/greybus/interface.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Interface Block code
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __INTERFACE_H
-#define __INTERFACE_H
-
-enum gb_interface_type {
- GB_INTERFACE_TYPE_INVALID = 0,
- GB_INTERFACE_TYPE_UNKNOWN,
- GB_INTERFACE_TYPE_DUMMY,
- GB_INTERFACE_TYPE_UNIPRO,
- GB_INTERFACE_TYPE_GREYBUS,
-};
-
-#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
-#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
-#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
-#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
-#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
-#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
-#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
-
-struct gb_interface {
- struct device dev;
- struct gb_control *control;
-
- struct list_head bundles;
- struct list_head module_node;
- struct list_head manifest_descs;
- u8 interface_id; /* Physical location within the Endo */
- u8 device_id;
- u8 features; /* Feature flags set in the manifest */
-
- enum gb_interface_type type;
-
- u32 ddbl1_manufacturer_id;
- u32 ddbl1_product_id;
- u32 vendor_id;
- u32 product_id;
- u64 serial_number;
-
- struct gb_host_device *hd;
- struct gb_module *module;
-
- unsigned long quirks;
-
- struct mutex mutex;
-
- bool disconnected;
-
- bool ejected;
- bool removed;
- bool active;
- bool enabled;
- bool mode_switch;
- bool dme_read;
-
- struct work_struct mode_switch_work;
- struct completion mode_switch_completion;
-};
-#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
-
-struct gb_interface *gb_interface_create(struct gb_module *module,
- u8 interface_id);
-int gb_interface_activate(struct gb_interface *intf);
-void gb_interface_deactivate(struct gb_interface *intf);
-int gb_interface_enable(struct gb_interface *intf);
-void gb_interface_disable(struct gb_interface *intf);
-int gb_interface_add(struct gb_interface *intf);
-void gb_interface_del(struct gb_interface *intf);
-void gb_interface_put(struct gb_interface *intf);
-void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
- u32 mailbox);
-
-int gb_interface_request_mode_switch(struct gb_interface *intf);
-
-#endif /* __INTERFACE_H */
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 010ae1e9c7fb..d6ba25f21d80 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -11,11 +11,9 @@
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/greybus.h>
#include <media/v4l2-flash-led-class.h>
-#include "greybus.h"
-#include "greybus_protocols.h"
-
#define NAMES_MAX 32
struct gb_channel {
@@ -1098,21 +1096,21 @@ static void gb_lights_channel_release(struct gb_channel *channel)
static void gb_lights_light_release(struct gb_light *light)
{
int i;
- int count;
light->ready = false;
- count = light->channels_count;
-
if (light->has_flash)
gb_lights_light_v4l2_unregister(light);
+ light->has_flash = false;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < light->channels_count; i++)
gb_lights_channel_release(&light->channels[i]);
- light->channels_count--;
- }
+ light->channels_count = 0;
+
kfree(light->channels);
+ light->channels = NULL;
kfree(light->name);
+ light->name = NULL;
}
static void gb_lights_release(struct gb_lights *glights)
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 15a88574dbb0..971f36dccac6 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -9,8 +9,7 @@
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/uaccess.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
struct gb_log {
struct gb_connection *connection;
@@ -31,14 +30,14 @@ static int gb_log_request_handler(struct gb_operation *op)
/* Verify size of payload */
if (op->request->payload_size < sizeof(*receive)) {
dev_err(dev, "log request too small (%zu < %zu)\n",
- op->request->payload_size, sizeof(*receive));
+ op->request->payload_size, sizeof(*receive));
return -EINVAL;
}
receive = op->request->payload;
len = le16_to_cpu(receive->len);
if (len != (op->request->payload_size - sizeof(*receive))) {
dev_err(dev, "log request wrong size %d vs %zu\n", len,
- (op->request->payload_size - sizeof(*receive)));
+ (op->request->payload_size - sizeof(*receive)));
return -EINVAL;
}
if (len == 0) {
@@ -83,7 +82,7 @@ static int gb_log_probe(struct gb_bundle *bundle,
return -ENOMEM;
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
- gb_log_request_handler);
+ gb_log_request_handler);
if (IS_ERR(connection)) {
retval = PTR_ERR(connection);
goto error_free;
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 48d85ebe404a..583d9708a191 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -25,12 +25,9 @@
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <linux/pm_runtime.h>
-
+#include <linux/greybus.h>
#include <asm/div64.h>
-#include "greybus.h"
-#include "connection.h"
-
#define NSEC_PER_DAY 86400000000000ULL
struct gb_loopback_stats {
@@ -882,7 +879,7 @@ static int gb_loopback_fn(void *data)
gb->type = 0;
gb->send_count = 0;
sysfs_notify(&gb->dev->kobj, NULL,
- "iteration_count");
+ "iteration_count");
dev_dbg(&bundle->dev, "load test complete\n");
} else {
dev_dbg(&bundle->dev,
@@ -1054,7 +1051,7 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
/* Allocate kfifo */
if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
- GFP_KERNEL)) {
+ GFP_KERNEL)) {
retval = -ENOMEM;
goto out_conn;
}
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
deleted file mode 100644
index f3c95a255631..000000000000
--- a/drivers/staging/greybus/manifest.h
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus manifest parsing
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __MANIFEST_H
-#define __MANIFEST_H
-
-struct gb_interface;
-bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
-
-#endif /* __MANIFEST_H */
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
deleted file mode 100644
index b1ebcc6636db..000000000000
--- a/drivers/staging/greybus/module.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Module code
- *
- * Copyright 2016 Google Inc.
- * Copyright 2016 Linaro Ltd.
- */
-
-#ifndef __MODULE_H
-#define __MODULE_H
-
-struct gb_module {
- struct device dev;
- struct gb_host_device *hd;
-
- struct list_head hd_node;
-
- u8 module_id;
- size_t num_interfaces;
-
- bool disconnected;
-
- struct gb_interface *interfaces[0];
-};
-#define to_gb_module(d) container_of(d, struct gb_module, dev)
-
-struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
- size_t num_interfaces);
-int gb_module_add(struct gb_module *module);
-void gb_module_del(struct gb_module *module);
-void gb_module_put(struct gb_module *module);
-
-#endif /* __MODULE_H */
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
deleted file mode 100644
index 40b7b02fff88..000000000000
--- a/drivers/staging/greybus/operation.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus operations
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __OPERATION_H
-#define __OPERATION_H
-
-#include <linux/completion.h>
-
-struct gb_operation;
-
-/* The default amount of time a request is given to complete */
-#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
-
-/*
- * The top bit of the type in an operation message header indicates
- * whether the message is a request (bit clear) or response (bit set)
- */
-#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
-
-enum gb_operation_result {
- GB_OP_SUCCESS = 0x00,
- GB_OP_INTERRUPTED = 0x01,
- GB_OP_TIMEOUT = 0x02,
- GB_OP_NO_MEMORY = 0x03,
- GB_OP_PROTOCOL_BAD = 0x04,
- GB_OP_OVERFLOW = 0x05,
- GB_OP_INVALID = 0x06,
- GB_OP_RETRY = 0x07,
- GB_OP_NONEXISTENT = 0x08,
- GB_OP_UNKNOWN_ERROR = 0xfe,
- GB_OP_MALFUNCTION = 0xff,
-};
-
-#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
-#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
-
-/*
- * Protocol code should only examine the payload and payload_size fields, and
- * host-controller drivers may use the hcpriv field. All other fields are
- * intended to be private to the operations core code.
- */
-struct gb_message {
- struct gb_operation *operation;
- struct gb_operation_msg_hdr *header;
-
- void *payload;
- size_t payload_size;
-
- void *buffer;
-
- void *hcpriv;
-};
-
-#define GB_OPERATION_FLAG_INCOMING BIT(0)
-#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
-#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
-#define GB_OPERATION_FLAG_CORE BIT(3)
-
-#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
- GB_OPERATION_FLAG_UNIDIRECTIONAL)
-
-/*
- * A Greybus operation is a remote procedure call performed over a
- * connection between two UniPro interfaces.
- *
- * Every operation consists of a request message sent to the other
- * end of the connection coupled with a reply message returned to
- * the sender. Every operation has a type, whose interpretation is
- * dependent on the protocol associated with the connection.
- *
- * Only four things in an operation structure are intended to be
- * directly usable by protocol handlers: the operation's connection
- * pointer; the operation type; the request message payload (and
- * size); and the response message payload (and size). Note that a
- * message with a 0-byte payload has a null message payload pointer.
- *
- * In addition, every operation has a result, which is an errno
- * value. Protocol handlers access the operation result using
- * gb_operation_result().
- */
-typedef void (*gb_operation_callback)(struct gb_operation *);
-struct gb_operation {
- struct gb_connection *connection;
- struct gb_message *request;
- struct gb_message *response;
-
- unsigned long flags;
- u8 type;
- u16 id;
- int errno; /* Operation result */
-
- struct work_struct work;
- gb_operation_callback callback;
- struct completion completion;
- struct timer_list timer;
-
- struct kref kref;
- atomic_t waiters;
-
- int active;
- struct list_head links; /* connection->operations */
-
- void *private;
-};
-
-static inline bool
-gb_operation_is_incoming(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_INCOMING;
-}
-
-static inline bool
-gb_operation_is_unidirectional(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
-}
-
-static inline bool
-gb_operation_short_response_allowed(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
-}
-
-static inline bool gb_operation_is_core(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_CORE;
-}
-
-void gb_connection_recv(struct gb_connection *connection,
- void *data, size_t size);
-
-int gb_operation_result(struct gb_operation *operation);
-
-size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
-struct gb_operation *
-gb_operation_create_flags(struct gb_connection *connection,
- u8 type, size_t request_size,
- size_t response_size, unsigned long flags,
- gfp_t gfp);
-
-static inline struct gb_operation *
-gb_operation_create(struct gb_connection *connection,
- u8 type, size_t request_size,
- size_t response_size, gfp_t gfp)
-{
- return gb_operation_create_flags(connection, type, request_size,
- response_size, 0, gfp);
-}
-
-struct gb_operation *
-gb_operation_create_core(struct gb_connection *connection,
- u8 type, size_t request_size,
- size_t response_size, unsigned long flags,
- gfp_t gfp);
-
-void gb_operation_get(struct gb_operation *operation);
-void gb_operation_put(struct gb_operation *operation);
-
-bool gb_operation_response_alloc(struct gb_operation *operation,
- size_t response_size, gfp_t gfp);
-
-int gb_operation_request_send(struct gb_operation *operation,
- gb_operation_callback callback,
- unsigned int timeout,
- gfp_t gfp);
-int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
- unsigned int timeout);
-static inline int
-gb_operation_request_send_sync(struct gb_operation *operation)
-{
- return gb_operation_request_send_sync_timeout(operation,
- GB_OPERATION_TIMEOUT_DEFAULT);
-}
-
-void gb_operation_cancel(struct gb_operation *operation, int errno);
-void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
-
-void greybus_message_sent(struct gb_host_device *hd,
- struct gb_message *message, int status);
-
-int gb_operation_sync_timeout(struct gb_connection *connection, int type,
- void *request, int request_size,
- void *response, int response_size,
- unsigned int timeout);
-int gb_operation_unidirectional_timeout(struct gb_connection *connection,
- int type, void *request, int request_size,
- unsigned int timeout);
-
-static inline int gb_operation_sync(struct gb_connection *connection, int type,
- void *request, int request_size,
- void *response, int response_size)
-{
- return gb_operation_sync_timeout(connection, type,
- request, request_size, response, response_size,
- GB_OPERATION_TIMEOUT_DEFAULT);
-}
-
-static inline int gb_operation_unidirectional(struct gb_connection *connection,
- int type, void *request, int request_size)
-{
- return gb_operation_unidirectional_timeout(connection, type,
- request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
-}
-
-static inline void *gb_operation_get_data(struct gb_operation *operation)
-{
- return operation->private;
-}
-
-static inline void gb_operation_set_data(struct gb_operation *operation,
- void *data)
-{
- operation->private = data;
-}
-
-int gb_operation_init(void);
-void gb_operation_exit(void);
-
-#endif /* !__OPERATION_H */
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index 34b40a409ea3..ec96f28887f9 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -10,8 +10,7 @@
#include <linux/module.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
#define PROP_MAX 32
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index 4a6d394b6c44..891a6a672378 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pwm.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_pwm_chip {
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index 838acbe84ca0..64a17dfe3b6e 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -13,8 +13,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/uaccess.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
struct gb_raw {
struct gb_connection *connection;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index a097a8916b3b..68c5718be827 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -12,8 +12,8 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_sdio_host {
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
index 47d896992b35..68e8d272db6d 100644
--- a/drivers/staging/greybus/spi.c
+++ b/drivers/staging/greybus/spi.c
@@ -7,8 +7,8 @@
*/
#include <linux/module.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
#include "spilib.h"
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index 2e07c6b41334..fc27c52de74a 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -10,9 +10,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/greybus.h>
#include <linux/spi/spi.h>
-#include "greybus.h"
#include "spilib.h"
struct gb_spilib {
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
index 043d4d32c3ee..9d416839e3be 100644
--- a/drivers/staging/greybus/spilib.h
+++ b/drivers/staging/greybus/spilib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus SPI library header
*
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
deleted file mode 100644
index ad01783bac9c..000000000000
--- a/drivers/staging/greybus/svc.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus SVC code
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-
-#ifndef __SVC_H
-#define __SVC_H
-
-#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
-#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
-#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
-
-enum gb_svc_state {
- GB_SVC_STATE_RESET,
- GB_SVC_STATE_PROTOCOL_VERSION,
- GB_SVC_STATE_SVC_HELLO,
-};
-
-enum gb_svc_watchdog_bite {
- GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
- GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
-};
-
-struct gb_svc_watchdog;
-
-struct svc_debugfs_pwrmon_rail {
- u8 id;
- struct gb_svc *svc;
-};
-
-struct gb_svc {
- struct device dev;
-
- struct gb_host_device *hd;
- struct gb_connection *connection;
- enum gb_svc_state state;
- struct ida device_id_map;
- struct workqueue_struct *wq;
-
- u16 endo_id;
- u8 ap_intf_id;
-
- u8 protocol_major;
- u8 protocol_minor;
-
- struct gb_svc_watchdog *watchdog;
- enum gb_svc_watchdog_bite action;
-
- struct dentry *debugfs_dentry;
- struct svc_debugfs_pwrmon_rail *pwrmon_rails;
-};
-#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
-
-struct gb_svc *gb_svc_create(struct gb_host_device *hd);
-int gb_svc_add(struct gb_svc *svc);
-void gb_svc_del(struct gb_svc *svc);
-void gb_svc_put(struct gb_svc *svc);
-
-int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
- u8 measurement_type, u32 *value);
-int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
-int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
- u8 intf2_id, u8 dev2_id);
-void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
-int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
- u8 intf2_id, u16 cport2_id, u8 cport_flags);
-void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
- u8 intf2_id, u16 cport2_id);
-int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
-int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
-int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
-int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
-int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
-int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
-
-int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
- u32 *value);
-int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
- u32 value);
-int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
- u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
- u8 tx_amplitude, u8 tx_hs_equalizer,
- u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
- u8 flags, u32 quirks,
- struct gb_svc_l2_timer_cfg *local,
- struct gb_svc_l2_timer_cfg *remote);
-int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
-int gb_svc_ping(struct gb_svc *svc);
-int gb_svc_watchdog_create(struct gb_svc *svc);
-void gb_svc_watchdog_destroy(struct gb_svc *svc);
-bool gb_svc_watchdog_enabled(struct gb_svc *svc);
-int gb_svc_watchdog_enable(struct gb_svc *svc);
-int gb_svc_watchdog_disable(struct gb_svc *svc);
-
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
-#endif /* __SVC_H */
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index cebc1d90a180..ba6f905f26fa 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -4,8 +4,6 @@
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Provided under the three clause BSD license found in the LICENSE file.
*/
#include <errno.h>
#include <fcntl.h>
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index b3bffe91ae99..55c51143bb09 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -28,8 +28,8 @@
#include <linux/kfifo.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
#define GB_NUM_MINORS 16 /* 16 is more than enough */
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
index 1c246c73a085..8e9d9d59a357 100644
--- a/drivers/staging/greybus/usb.c
+++ b/drivers/staging/greybus/usb.c
@@ -10,8 +10,8 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
/* Greybus USB request types */
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
index 3e5dedeacd5c..0e2b188e5ca3 100644
--- a/drivers/staging/greybus/vibrator.c
+++ b/drivers/staging/greybus/vibrator.c
@@ -13,8 +13,7 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/pm_runtime.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
struct gb_vibrator_device {
struct gb_connection *connection;
diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c
index c124a836db27..738122afc2ae 100644
--- a/drivers/staging/kpc2000/kpc2000/cell_probe.c
+++ b/drivers/staging/kpc2000/kpc2000/cell_probe.c
@@ -53,15 +53,15 @@ struct core_table_entry {
static
void parse_core_table_entry_v0(struct core_table_entry *cte, const u64 read_val)
{
- cte->type = ((read_val & 0xFFF0000000000000) >> 52);
- cte->offset = ((read_val & 0x00000000FFFF0000) >> 16) * 4096;
- cte->length = ((read_val & 0x0000FFFF00000000) >> 32) * 8;
- cte->s2c_dma_present = ((read_val & 0x0008000000000000) >> 51);
- cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000) >> 48);
- cte->c2s_dma_present = ((read_val & 0x0000000000008000) >> 15);
- cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000) >> 12);
- cte->irq_count = ((read_val & 0x0000000000000C00) >> 10);
- cte->irq_base_num = ((read_val & 0x00000000000003F8) >> 3);
+ cte->type = ((read_val & 0xFFF0000000000000UL) >> 52);
+ cte->offset = ((read_val & 0x00000000FFFF0000UL) >> 16) * 4096;
+ cte->length = ((read_val & 0x0000FFFF00000000UL) >> 32) * 8;
+ cte->s2c_dma_present = ((read_val & 0x0008000000000000UL) >> 51);
+ cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000UL) >> 48);
+ cte->c2s_dma_present = ((read_val & 0x0000000000008000UL) >> 15);
+ cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000UL) >> 12);
+ cte->irq_count = ((read_val & 0x0000000000000C00UL) >> 10);
+ cte->irq_base_num = ((read_val & 0x00000000000003F8UL) >> 3);
}
static
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index cb05cca687e1..0a23727d0dc3 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -205,7 +205,7 @@ static void wait_and_read_ssid(struct kp2000_device *pcard)
u64 read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
unsigned long timeout;
- if (read_val & 0x8000000000000000) {
+ if (read_val & 0x8000000000000000UL) {
pcard->ssid = read_val;
return;
}
@@ -213,7 +213,7 @@ static void wait_and_read_ssid(struct kp2000_device *pcard)
timeout = jiffies + (HZ * 2);
do {
read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
- if (read_val & 0x8000000000000000) {
+ if (read_val & 0x8000000000000000UL) {
pcard->ssid = read_val;
return;
}
@@ -241,16 +241,16 @@ static int read_system_regs(struct kp2000_device *pcard)
}
read_val = readq(pcard->sysinfo_regs_base + REG_CARD_ID_AND_BUILD);
- pcard->card_id = (read_val & 0xFFFFFFFF00000000) >> 32;
- pcard->build_version = (read_val & 0x00000000FFFFFFFF) >> 0;
+ pcard->card_id = (read_val & 0xFFFFFFFF00000000UL) >> 32;
+ pcard->build_version = (read_val & 0x00000000FFFFFFFFUL) >> 0;
read_val = readq(pcard->sysinfo_regs_base + REG_DATE_AND_TIME_STAMPS);
- pcard->build_datestamp = (read_val & 0xFFFFFFFF00000000) >> 32;
- pcard->build_timestamp = (read_val & 0x00000000FFFFFFFF) >> 0;
+ pcard->build_datestamp = (read_val & 0xFFFFFFFF00000000UL) >> 32;
+ pcard->build_timestamp = (read_val & 0x00000000FFFFFFFFUL) >> 0;
read_val = readq(pcard->sysinfo_regs_base + REG_CORE_TABLE_OFFSET);
- pcard->core_table_length = (read_val & 0xFFFFFFFF00000000) >> 32;
- pcard->core_table_offset = (read_val & 0x00000000FFFFFFFF) >> 0;
+ pcard->core_table_length = (read_val & 0xFFFFFFFF00000000UL) >> 32;
+ pcard->core_table_offset = (read_val & 0x00000000FFFFFFFFUL) >> 0;
wait_and_read_ssid(pcard);
@@ -401,7 +401,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
goto err_release_dma;
// Disable all "user" interrupts because they're not used yet.
- writeq(0xFFFFFFFFFFFFFFFF,
+ writeq(0xFFFFFFFFFFFFFFFFUL,
pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
// let the card master PCIe
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index b108da4ac633..bc02534d8dc3 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -123,9 +123,9 @@ struct i2c_device {
// FIXME!
#undef inb_p
-#define inb_p(a) readq((void *)a)
+#define inb_p(a) readq((void __iomem *)a)
#undef outb_p
-#define outb_p(d, a) writeq(d, (void *)a)
+#define outb_p(d, a) writeq(d, (void __iomem *)a)
/* Make sure the SMBus host is ready to start transmitting.
* Return 0 if it is, -EBUSY if it is not.
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 48ca88bc6b0b..cb52bd9a6d2f 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -146,15 +146,15 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
card_addr += desc->DescByteCount;
dma_addr = sg_dma_address(sg) + (p * 0x80000);
- desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0;
- desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32;
+ desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFFUL) >> 0;
+ desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000UL) >> 32;
user_ctl = acd->priv->user_ctl;
if (i == acd->mapped_entry_count-1 && p == pcnt-1) {
user_ctl = acd->priv->user_ctl_last;
}
- desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0;
- desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32;
+ desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFFUL) >> 0;
+ desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000UL) >> 32;
if (i == acd->mapped_entry_count-1 && p == pcnt-1)
desc->acd = acd;
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index b9841adb7181..8e9a0b67c6ed 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -303,7 +303,8 @@ static ssize_t set_datatype_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
- return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
+ return snprintf(buf, PAGE_SIZE, "%s",
+ ch_data_type[i].name);
}
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
@@ -721,6 +722,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
return link_channel_to_component(c, comp, link_name, comp_param);
}
+
/**
* remove_link_store - store function for remove_link attribute
* @drv: device driver
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 342f390d68b3..79817061fcfa 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -802,8 +802,11 @@ static int __init audio_init(void)
if (ret)
pr_err("Failed to register %s\n", comp.name);
ret = most_register_configfs_subsys(&comp);
- if (ret)
+ if (ret) {
pr_err("Failed to register %s configfs subsys\n", comp.name);
+ most_deregister_component(&comp);
+ }
+
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 620da6c003d8..d4278361e002 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1373,11 +1373,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
/* append to first fragment frame's tail (if privacy frame, pull the ICV) */
skb_trim(prframe->pkt, prframe->pkt->len - prframe->attrib.icv_len);
- /* memcpy */
- memcpy(skb_tail_pointer(prframe->pkt), pnfhdr->pkt->data,
- pnfhdr->pkt->len);
-
- skb_put(prframe->pkt, pnfhdr->pkt->len);
+ skb_put_data(prframe->pkt, pnfhdr->pkt->data, pnfhdr->pkt->len);
prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = plist->next;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index eedf2cd831d1..aaab0d577453 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -122,8 +122,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
precvframe->pkt = pkt_copy;
skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
- memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
- skb_put(precvframe->pkt, skb_len);
+ skb_put_data(pkt_copy, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
} else {
DBG_88E("%s: alloc_skb fail , drop frag frame\n",
__func__);
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 11528d17bb3c..1007eea6c8fc 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -15,6 +15,7 @@ config RTLLIB_CRYPTO_CCMP
tristate "Support for rtllib CCMP crypto"
depends on RTLLIB
select CRYPTO_AES
+ select CRYPTO_CCM
default y
help
CCMP crypto driver for rtllib.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 1b7e3fda7905..20e494186c9e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -618,7 +618,7 @@ static void _rtl92e_dm_tx_update_tssi_strong_signal(struct net_device *dev,
static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- bool bHighpowerstate, viviflag = false;
+ bool viviflag = false;
struct dcmd_txcmd tx_cmd;
u8 powerlevelOFDM24G;
int i = 0, j = 0, k = 0;
@@ -632,7 +632,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
- bHighpowerstate = priv->bDynamicTxHighPower;
powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
RF_Type = priv->rf_type;
@@ -1901,7 +1900,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
u8 cck_default_Rx = 0x2;
u8 cck_optional_Rx = 0x3;
long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0;
- u8 cck_rx_ver2_max_index = 0, cck_rx_ver2_min_index = 0;
+ u8 cck_rx_ver2_max_index = 0;
u8 cck_rx_ver2_sec_index = 0;
u8 cur_rf_rssi;
long cur_cck_pwdb;
@@ -1984,7 +1983,6 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
if (rf_num == 1) {
cck_rx_ver2_max_index = i;
- cck_rx_ver2_min_index = i;
cck_rx_ver2_sec_index = i;
tmp_cck_max_pwdb = cur_cck_pwdb;
tmp_cck_min_pwdb = cur_cck_pwdb;
@@ -1997,7 +1995,6 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
tmp_cck_sec_pwdb = cur_cck_pwdb;
tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
- cck_rx_ver2_min_index = i;
}
} else {
if (cur_cck_pwdb > tmp_cck_max_pwdb) {
@@ -2027,13 +2024,10 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
(cur_cck_pwdb > tmp_cck_min_pwdb)) {
;
} else if (cur_cck_pwdb == tmp_cck_min_pwdb) {
- if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
+ if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- }
} else if (cur_cck_pwdb < tmp_cck_min_pwdb) {
tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 2581ed6d14fa..0cbf4a1a326b 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -17,6 +17,7 @@
#include "rtllib.h"
#include <linux/crypto.h>
+#include <crypto/aead.h>
#include <linux/scatterlist.h>
@@ -39,20 +40,13 @@ struct rtllib_ccmp_data {
int key_idx;
- struct crypto_tfm *tfm;
+ struct crypto_aead *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
- tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
- u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
+ u8 tx_aad[2 * AES_BLOCK_LEN];
+ u8 rx_aad[2 * AES_BLOCK_LEN];
};
-static void rtllib_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
-{
- crypto_cipher_encrypt_one((void *)tfm, ct, pt);
-}
-
static void *rtllib_ccmp_init(int key_idx)
{
struct rtllib_ccmp_data *priv;
@@ -62,7 +56,7 @@ static void *rtllib_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void *)crypto_alloc_cipher("aes", 0, 0);
+ priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
pr_debug("Could not allocate crypto API aes\n");
priv->tfm = NULL;
@@ -73,7 +67,7 @@ static void *rtllib_ccmp_init(int key_idx)
fail:
if (priv) {
if (priv->tfm)
- crypto_free_cipher((void *)priv->tfm);
+ crypto_free_aead(priv->tfm);
kfree(priv);
}
@@ -86,31 +80,18 @@ static void rtllib_ccmp_deinit(void *priv)
struct rtllib_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
- crypto_free_cipher((void *)_priv->tfm);
+ crypto_free_aead(_priv->tfm);
kfree(priv);
}
-static inline void xor_block(u8 *b, u8 *a, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- b[i] ^= a[i];
-}
-
-
-
-static void ccmp_init_blocks(struct crypto_tfm *tfm,
- struct rtllib_hdr_4addr *hdr,
- u8 *pn, size_t dlen, u8 *b0, u8 *auth,
- u8 *s0)
+static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
+ u8 *pn, u8 *iv, u8 *aad)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
- u8 aad[2 * AES_BLOCK_LEN];
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
@@ -128,18 +109,19 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc = *pos & 0x0f;
aad_len += 2;
}
- /* CCM Initial Block:
- * Flag (Include authentication header, M=3 (8-octet MIC),
- * L=1 (2-octet Dlen))
- * Nonce: 0x00 | A2 | PN
- * Dlen
+ /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
+ * mode authentication are not allowed to collide, yet both are derived
+ * from the same vector. We only set L := 1 here to indicate that the
+ * data size can be represented in (L+1) bytes. The CCM layer will take
+ * care of storing the data length in the top (L+1) bytes and setting
+ * and clearing the other bits as is required to derive the two IVs.
*/
- b0[0] = 0x59;
- b0[1] = qc;
- memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
- memcpy(b0 + 8, pn, CCMP_PN_LEN);
- b0[14] = (dlen >> 8) & 0xff;
- b0[15] = dlen & 0xff;
+ iv[0] = 0x1;
+
+ /* Nonce: QC | A2 | PN */
+ iv[1] = qc;
+ memcpy(iv + 2, hdr->addr2, ETH_ALEN);
+ memcpy(iv + 8, pn, CCMP_PN_LEN);
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
@@ -149,31 +131,21 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
* QC (if present)
*/
pos = (u8 *) hdr;
- aad[0] = 0; /* aad_len >> 8 */
- aad[1] = aad_len & 0xff;
- aad[2] = pos[0] & 0x8f;
- aad[3] = pos[1] & 0xc7;
- memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
+ aad[0] = pos[0] & 0x8f;
+ aad[1] = pos[1] & 0xc7;
+ memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN);
pos = (u8 *) &hdr->seq_ctl;
- aad[22] = pos[0] & 0x0f;
- aad[23] = 0; /* all bits masked */
- memset(aad + 24, 0, 8);
+ aad[20] = pos[0] & 0x0f;
+ aad[21] = 0; /* all bits masked */
+ memset(aad + 22, 0, 8);
if (a4_included)
- memcpy(aad + 24, hdr->addr4, ETH_ALEN);
+ memcpy(aad + 22, hdr->addr4, ETH_ALEN);
if (qc_included) {
- aad[a4_included ? 30 : 24] = qc;
+ aad[a4_included ? 28 : 22] = qc;
/* rest of QC masked */
}
- /* Start with the first block and AAD */
- rtllib_ccmp_aes_encrypt(tfm, b0, auth);
- xor_block(auth, aad, AES_BLOCK_LEN);
- rtllib_ccmp_aes_encrypt(tfm, auth, auth);
- xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
- rtllib_ccmp_aes_encrypt(tfm, auth, auth);
- b0[0] &= 0x07;
- b0[14] = b0[15] = 0;
- rtllib_ccmp_aes_encrypt(tfm, b0, s0);
+ return aad_len;
}
@@ -181,7 +153,7 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_ccmp_data *key = priv;
- int data_len, i;
+ int i;
u8 *pos;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
@@ -191,7 +163,6 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
@@ -213,40 +184,37 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
-
hdr = (struct rtllib_hdr_4addr *) skb->data;
if (!tcb_desc->bHwSec) {
- int blocks, last, len;
- u8 *mic;
- u8 *b0 = key->tx_b0;
- u8 *b = key->tx_b;
- u8 *e = key->tx_e;
- u8 *s0 = key->tx_s0;
-
- mic = skb_put(skb, CCMP_MIC_LEN);
-
- ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len,
- b0, b, s0);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Authentication */
- xor_block(b, pos, len);
- rtllib_ccmp_aes_encrypt(key->tfm, b, b);
- /* Encryption, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- rtllib_ccmp_aes_encrypt(key->tfm, b0, e);
- xor_block(pos, e, len);
- pos += len;
- }
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->tx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+ int data_len = skb->len - hdr_len - CCMP_HDR_LEN;
+
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
+
+ skb_put(skb, CCMP_MIC_LEN);
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
+ data_len + CCMP_MIC_LEN);
- for (i = 0; i < CCMP_MIC_LEN; i++)
- mic[i] = b[i] ^ s0[i];
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_encrypt(req);
+ aead_request_free(req);
+
+ return ret;
}
+
return 0;
}
@@ -302,35 +270,31 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -4;
}
if (!tcb_desc->bHwSec) {
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN -
- CCMP_MIC_LEN;
- u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
- u8 *b0 = key->rx_b0;
- u8 *b = key->rx_b;
- u8 *a = key->rx_a;
- int i, blocks, last, len;
-
-
- ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
- xor_block(mic, b, CCMP_MIC_LEN);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Decrypt, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- rtllib_ccmp_aes_encrypt(key->tfm, b0, b);
- xor_block(pos, b, len);
- /* Authentication */
- xor_block(a, pos, len);
- rtllib_ccmp_aes_encrypt(key->tfm, a, a);
- pos += len;
- }
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->rx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if(!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], pos, data_len);
+
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_decrypt(req);
+ aead_request_free(req);
- if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
+ if (ret) {
if (net_ratelimit()) {
pr_debug("CCMP: decrypt failed: STA= %pM\n",
hdr->addr2);
@@ -354,7 +318,7 @@ static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_ccmp_data *data = priv;
int keyidx;
- struct crypto_tfm *tfm = data->tfm;
+ struct crypto_aead *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
@@ -371,7 +335,9 @@ static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
- crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
+ if (crypto_aead_setauthsize(data->tfm, CCMP_MIC_LEN) ||
+ crypto_aead_setkey(data->tfm, data->key, CCMP_TK_LEN))
+ return -1;
} else if (len == 0) {
data->key_set = 0;
} else {
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 22c2165e8b1c..1edca5c304fb 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -6,3 +6,5 @@ config RTL8192U
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_CCM
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 130ddfe9868f..bc642076b96f 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -12,7 +12,7 @@ void rtl8192u_dot11d_init(struct ieee80211_device *ieee)
dot11d_info->state = DOT11D_STATE_NONE;
dot11d_info->country_ie_len = 0;
memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
RESET_CIE_WATCHDOG(ieee);
netdev_info(ieee->dev, "rtl8192u_dot11d_init()\n");
@@ -25,8 +25,8 @@ void dot11d_reset(struct ieee80211_device *ieee)
u32 i;
struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
/* Clear old channel map */
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
/* Set new channel map */
for (i = 1; i <= 11; i++)
(dot11d_info->channel_map)[i] = 1;
@@ -56,8 +56,8 @@ void dot11d_update_country_ie(struct ieee80211_device *dev, u8 *pTaddr,
u8 i, j, NumTriples, MaxChnlNum;
struct chnl_txpower_triple *pTriple;
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
pTriple = (struct chnl_txpower_triple *)(pCoutryIe + 3);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 3963a08b9eb2..9576b647f6b1 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -169,9 +169,9 @@ struct cb_desc {
#define MGN_MCS14 0x8e
#define MGN_MCS15 0x8f
-#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A || \
+#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A || \
priv->ieee80211->current_network.mode == IEEE_N_24G || \
- priv->ieee80211->current_network.mode == IEEE_N_5G) ? \
+ priv->ieee80211->current_network.mode == IEEE_N_5G) ? \
16 : 10)
#define MGMT_QUEUE_NUM 5
@@ -387,7 +387,7 @@ struct ieee_param {
#define IEEE80211_STYPE_ACK 0x00D0
#define IEEE80211_STYPE_CFEND 0x00E0
#define IEEE80211_STYPE_CFENDACK 0x00F0
-#define IEEE80211_STYPE_BLOCKACK 0x0094
+#define IEEE80211_STYPE_BLOCKACK 0x0094
/* data */
#define IEEE80211_STYPE_DATA 0x0000
@@ -452,23 +452,23 @@ do { if (ieee80211_debug_level & (level)) \
printk(KERN_DEBUG "ieee80211: " fmt, ## args); } while (0)
//wb added to debug out data buf
//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
-#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do { if ((ieee80211_debug_level & (level)) == (level)) \
- { \
- int i; \
- u8 *pdata = (u8 *) data; \
- printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
- for (i = 0; i < (int)(datalen); i++) \
- { \
- printk("%2x ", pdata[i]); \
- if ((i + 1) % 16 == 0) printk("\n"); \
- } \
- printk("\n"); \
- } \
+#define IEEE80211_DEBUG_DATA(level, data, datalen) \
+ do { if ((ieee80211_debug_level & (level)) == (level)) \
+ { \
+ int i; \
+ u8 *pdata = (u8 *)data; \
+ printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
+ for (i = 0; i < (int)(datalen); i++) { \
+ printk("%2x ", pdata[i]); \
+ if ((i + 1) % 16 == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
} while (0)
#else
#define IEEE80211_DEBUG (level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA (level, data, datalen) do {} while(0)
+#define IEEE80211_DEBUG_DATA (level, data, datalen) do {} while (0)
#endif /* CONFIG_IEEE80211_DEBUG */
/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
@@ -1649,10 +1649,8 @@ struct ieee80211_device {
struct list_head Rx_TS_Pending_List;
struct list_head Rx_TS_Unused_List;
struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
-//#ifdef TO_DO_LIST
struct rx_reorder_entry RxReorderEntry[128];
struct list_head RxReorder_Unused_List;
-//#endif
// Qos related. Added by Annie, 2005-11-01.
// PSTA_QOS pStaQos;
u8 ForcedPriority; // Force per-packet priority 1~7. (default: 0, not to force it.)
@@ -2015,8 +2013,8 @@ struct ieee80211_device {
#define IEEE_A (1<<0)
#define IEEE_B (1<<1)
#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
-#define IEEE_N_5G (1<<5)
+#define IEEE_N_24G (1<<4)
+#define IEEE_N_5G (1<<5)
#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
/* Generate a 802.11 header */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 36987fccac5d..01012dddcd73 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -176,7 +176,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
}
-static void *ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
+static void *ieee80211_crypt_null_init(int keyidx) { return (void *)1; }
static void ieee80211_crypt_null_deinit(void *priv) {}
static struct ieee80211_crypto_ops ieee80211_crypt_null = {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index d7188b3f3190..c241cf484023 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -19,6 +19,7 @@
#include "ieee80211.h"
#include <linux/crypto.h>
+#include <crypto/aead.h>
#include <linux/scatterlist.h>
MODULE_AUTHOR("Jouni Malinen");
@@ -44,20 +45,13 @@ struct ieee80211_ccmp_data {
int key_idx;
- struct crypto_tfm *tfm;
+ struct crypto_aead *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
- tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
- u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
+ u8 tx_aad[2 * AES_BLOCK_LEN];
+ u8 rx_aad[2 * AES_BLOCK_LEN];
};
-static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
-{
- crypto_cipher_encrypt_one((void *)tfm, ct, pt);
-}
-
static void *ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
@@ -67,7 +61,7 @@ static void *ieee80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void *)crypto_alloc_cipher("aes", 0, 0);
+ priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
pr_debug("ieee80211_crypt_ccmp: could not allocate crypto API aes\n");
priv->tfm = NULL;
@@ -79,7 +73,7 @@ static void *ieee80211_ccmp_init(int key_idx)
fail:
if (priv) {
if (priv->tfm)
- crypto_free_cipher((void *)priv->tfm);
+ crypto_free_aead(priv->tfm);
kfree(priv);
}
@@ -91,28 +85,17 @@ static void ieee80211_ccmp_deinit(void *priv)
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
- crypto_free_cipher((void *)_priv->tfm);
+ crypto_free_aead(_priv->tfm);
kfree(priv);
}
-static inline void xor_block(u8 *b, u8 *a, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- b[i] ^= a[i];
-}
-
-static void ccmp_init_blocks(struct crypto_tfm *tfm,
- struct rtl_80211_hdr_4addr *hdr,
- u8 *pn, size_t dlen, u8 *b0, u8 *auth,
- u8 *s0)
+static int ccmp_init_iv_and_aad(struct rtl_80211_hdr_4addr *hdr,
+ u8 *pn, u8 *iv, u8 *aad)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
- u8 aad[2 * AES_BLOCK_LEN];
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
@@ -133,18 +116,20 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc = *pos & 0x0f;
aad_len += 2;
}
- /* CCM Initial Block:
- * Flag (Include authentication header, M=3 (8-octet MIC),
- * L=1 (2-octet Dlen))
- * Nonce: 0x00 | A2 | PN
- * Dlen
+
+ /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
+ * mode authentication are not allowed to collide, yet both are derived
+ * from the same vector. We only set L := 1 here to indicate that the
+ * data size can be represented in (L+1) bytes. The CCM layer will take
+ * care of storing the data length in the top (L+1) bytes and setting
+ * and clearing the other bits as is required to derive the two IVs.
*/
- b0[0] = 0x59;
- b0[1] = qc;
- memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
- memcpy(b0 + 8, pn, CCMP_PN_LEN);
- b0[14] = (dlen >> 8) & 0xff;
- b0[15] = dlen & 0xff;
+ iv[0] = 0x1;
+
+ /* Nonce: QC | A2 | PN */
+ iv[1] = qc;
+ memcpy(iv + 2, hdr->addr2, ETH_ALEN);
+ memcpy(iv + 8, pn, CCMP_PN_LEN);
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
@@ -154,38 +139,27 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
* QC (if present)
*/
pos = (u8 *)hdr;
- aad[0] = 0; /* aad_len >> 8 */
- aad[1] = aad_len & 0xff;
- aad[2] = pos[0] & 0x8f;
- aad[3] = pos[1] & 0xc7;
- memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
+ aad[0] = pos[0] & 0x8f;
+ aad[1] = pos[1] & 0xc7;
+ memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN);
pos = (u8 *)&hdr->seq_ctl;
- aad[22] = pos[0] & 0x0f;
- aad[23] = 0; /* all bits masked */
- memset(aad + 24, 0, 8);
+ aad[20] = pos[0] & 0x0f;
+ aad[21] = 0; /* all bits masked */
+ memset(aad + 22, 0, 8);
if (a4_included)
- memcpy(aad + 24, hdr->addr4, ETH_ALEN);
+ memcpy(aad + 22, hdr->addr4, ETH_ALEN);
if (qc_included) {
- aad[a4_included ? 30 : 24] = qc;
+ aad[a4_included ? 28 : 22] = qc;
/* rest of QC masked */
}
- /* Start with the first block and AAD */
- ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
- xor_block(auth, aad, AES_BLOCK_LEN);
- ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
- xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
- ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
- b0[0] &= 0x07;
- b0[14] = 0;
- b0[15] = 0;
- ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
+ return aad_len;
}
static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
- int data_len, i;
+ int i;
u8 *pos;
struct rtl_80211_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
@@ -195,7 +169,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
@@ -220,36 +193,34 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
- int blocks, last, len;
- u8 *mic;
- u8 *b0 = key->tx_b0;
- u8 *b = key->tx_b;
- u8 *e = key->tx_e;
- u8 *s0 = key->tx_s0;
-
- /* mic is moved to here by john */
- mic = skb_put(skb, CCMP_MIC_LEN);
-
- ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Authentication */
- xor_block(b, pos, len);
- ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
- /* Encryption, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
- xor_block(pos, e, len);
- pos += len;
- }
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->tx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
- for (i = 0; i < CCMP_MIC_LEN; i++)
- mic[i] = b[i] ^ s0[i];
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
+
+ skb_put(skb, CCMP_MIC_LEN);
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
+ data_len + CCMP_MIC_LEN);
+
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_encrypt(req);
+ aead_request_free(req);
+
+ return ret;
}
return 0;
}
@@ -309,33 +280,31 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -4;
}
if (!tcb_desc->bHwSec) {
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
- u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
- u8 *b0 = key->rx_b0;
- u8 *b = key->rx_b;
- u8 *a = key->rx_a;
- int i, blocks, last, len;
-
- ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
- xor_block(mic, b, CCMP_MIC_LEN);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Decrypt, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
- xor_block(pos, b, len);
- /* Authentication */
- xor_block(a, pos, len);
- ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
- pos += len;
- }
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->rx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
+
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], pos, data_len);
+
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_decrypt(req);
+ aead_request_free(req);
- if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
+ if (ret) {
if (net_ratelimit()) {
netdev_dbg(skb->dev, "CCMP: decrypt failed: STA=%pM\n",
hdr->addr2);
@@ -358,12 +327,11 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
int keyidx;
- struct crypto_tfm *tfm = data->tfm;
+ struct crypto_aead *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
data->key_idx = keyidx;
- data->tfm = tfm;
if (len == CCMP_TK_LEN) {
memcpy(data->key, key, CCMP_TK_LEN);
data->key_set = 1;
@@ -375,7 +343,9 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
- crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
+ if (crypto_aead_setauthsize(tfm, CCMP_MIC_LEN) ||
+ crypto_aead_setkey(tfm, data->key, CCMP_TK_LEN))
+ return -1;
} else if (len == 0) {
data->key_set = 0;
} else {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 0927b2b15151..6f4710171151 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -160,7 +160,7 @@ static inline u16 Hi16(u32 val)
static inline u16 Mk16(u8 hi, u8 lo)
{
- return lo | (((u16) hi) << 8);
+ return lo | (((u16)hi) << 8);
}
static const u16 Sbox[256] = {
@@ -238,7 +238,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
* Make temporary area overlap WEP seed so that the final copy can be
* avoided on little endian hosts.
*/
- u16 *PPK = (u16 *) &WEPSeed[4];
+ u16 *PPK = (u16 *)&WEPSeed[4];
/* Step 1 - make copy of TTAK and bring in TSC */
PPK[0] = TTAK[0];
@@ -299,7 +299,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
if (!tkey->tx_phase1_done) {
@@ -343,7 +343,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, len+4);
+ sg_init_one(&sg, pos, len + 4);
skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
@@ -383,7 +383,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (skb->len < hdr_len + 8 + 4)
return -1;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & BIT(5))) {
@@ -435,7 +435,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen+4);
+ sg_init_one(&sg, pos, plen + 4);
skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -523,7 +523,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
struct rtl_80211_hdr_4addr *hdr11;
- hdr11 = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr11 = (struct rtl_80211_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_TODS:
@@ -556,7 +556,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
u8 *pos;
struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
printk(KERN_DEBUG "Invalid packet for Michael MIC add "
@@ -599,7 +599,7 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+ wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
}
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
@@ -609,7 +609,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
u8 mic[8];
struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (!tkey->key_set)
return -1;
@@ -626,7 +626,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 805493a0870d..26482c3dcd1c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
- sg_init_one(&sg, pos, len+4);
+ sg_init_one(&sg, pos, len + 4);
skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -192,7 +192,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
- sg_init_one(&sg, pos, plen+4);
+ sg_init_one(&sg, pos, plen + 4);
skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 7ef1e89de269..5c33bcb0db2e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -103,17 +103,17 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
u8 tid;
if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else {
- tid = 0;
+ tid = 0;
}
if (frag == 0) {
@@ -170,17 +170,17 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
u8 tid;
if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else {
- tid = 0;
+ tid = 0;
}
entry = ieee80211_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
@@ -218,8 +218,8 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
rx_stats->len = skb->len;
ieee80211_rx_mgt(ieee, (struct rtl_80211_hdr_4addr *)skb->data, rx_stats);
/* if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) */
- if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))/* use ADDR1 to perform address matching for Management frames */
- {
+ if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) {
+ /* use ADDR1 to perform address matching for Management frames */
dev_kfree_skb_any(skb);
return 0;
}
@@ -339,8 +339,7 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
if (!crypt || !crypt->ops->decrypt_mpdu)
return 0;
- if (ieee->hwsec_active)
- {
+ if (ieee->hwsec_active) {
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
}
@@ -386,8 +385,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
if (!crypt || !crypt->ops->decrypt_msdu)
return 0;
- if (ieee->hwsec_active)
- {
+ if (ieee->hwsec_active) {
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
}
@@ -427,17 +425,17 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
//TO2DS and QoS
if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else { // no QoS
- tid = 0;
+ tid = 0;
}
switch (ieee->iw_mode) {
@@ -507,8 +505,7 @@ drop:
static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *pReorderEntry)
{
struct list_head *pList = &pTS->rx_pending_pkt_list;
- while (pList->next != &pTS->rx_pending_pkt_list)
- {
+ while (pList->next != &pTS->rx_pending_pkt_list) {
if (SN_LESS(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
pList = pList->next;
else if (SN_EQUAL(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
@@ -530,8 +527,7 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
u16 ethertype;
// if(index > 1)
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__func__,index);
- for (j = 0; j < index; j++)
- {
+ for (j = 0; j < index; j++) {
//added by amy for reorder
struct ieee80211_rxb *prxb = prxbIndicateArray[j];
for (i = 0; i < prxb->nr_subframes; i++) {
@@ -699,8 +695,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): start RREORDER indicate\n", __func__);
pReorderEntry = list_entry(pTS->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
- {
+ SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq)) {
/* This protect buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n");
@@ -922,8 +917,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
frag = WLAN_GET_SEQ_FRAG(sc);
hdrlen = ieee80211_get_hdrlen(fc);
- if (HTCCheck(ieee, skb->data))
- {
+ if (HTCCheck(ieee, skb->data)) {
if (net_ratelimit())
printk("find HTCControl\n");
hdrlen += 4;
@@ -999,7 +993,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
// if QoS enabled, should check the sequence for each of the AC
if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
if (is_duplicate_packet(ieee, hdr))
- goto rx_dropped;
+ goto rx_dropped;
} else {
struct rx_ts_record *pRxTS = NULL;
@@ -1010,8 +1004,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
hdr->addr2,
Frame_QoSTID((u8 *)(skb->data)),
RX_DIR,
- true))
- {
+ true)) {
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->rx_last_frag_num is %d,frag is %d,pRxTS->rx_last_seq_num is %d,seq is %d\n",__func__,pRxTS->rx_last_frag_num,frag,pRxTS->rx_last_seq_num,WLAN_GET_SEQ_SEQ(sc));
if ((fc & (1 << 11)) &&
@@ -1119,8 +1112,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
- {
+ (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
printk("decrypt frame error\n");
goto rx_dropped;
}
@@ -1185,8 +1177,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
- {
+ ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
printk("==>decrypt msdu error\n");
goto rx_dropped;
}
@@ -1220,10 +1211,10 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef CONFIG_IEEE80211_DEBUG
if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- struct eapol *eap = (struct eapol *)(skb->data +
- 24);
- IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
- eap_get_type(eap->type));
+ struct eapol *eap = (struct eapol *)(skb->data +
+ 24);
+ IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
+ eap_get_type(eap->type));
}
#endif
@@ -1243,13 +1234,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
*/
//added by amy for reorder
if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
- && !is_multicast_ether_addr(hdr->addr1))
- {
+ && !is_multicast_ether_addr(hdr->addr1)) {
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID, RX_DIR, true);
- if (TID != 0 && TID != 3)
- {
+ GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID, RX_DIR, true);
+ if (TID != 0 && TID != 3) {
ieee->bis_any_nonbepkts = true;
}
}
@@ -1549,15 +1538,12 @@ static inline void ieee80211_extract_country_ie(
u8 *addr2
)
{
- if (IS_DOT11D_ENABLE(ieee))
- {
- if (info_element->len != 0)
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (info_element->len != 0) {
memcpy(network->CountryIeBuf, info_element->data, info_element->len);
network->CountryIeLen = info_element->len;
- if (!IS_COUNTRY_IE_VALID(ieee))
- {
+ if (!IS_COUNTRY_IE_VALID(ieee)) {
dot11d_update_country_ie(ieee, addr2, info_element->len, info_element->data);
}
}
@@ -1567,8 +1553,7 @@ static inline void ieee80211_extract_country_ie(
// some AP (e.g. Cisco 1242) don't include country IE in their
// probe response frame.
//
- if (IS_EQUAL_CIE_SRC(ieee, addr2))
- {
+ if (IS_EQUAL_CIE_SRC(ieee, addr2)) {
UPDATE_CIE_WATCHDOG(ieee);
}
}
@@ -1785,13 +1770,13 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x033){
- tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
- if (tmp_htcap_len != 0) {
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? \
- sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
- }
+ tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htcap_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? \
+ sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
+ memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
+ }
}
if (tmp_htcap_len != 0)
network->bssht.bdSupportHT = true;
@@ -1807,17 +1792,17 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x034){
- tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
- if (tmp_htinfo_len != 0) {
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- if (tmp_htinfo_len) {
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? \
- sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
- }
-
+ tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htinfo_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ if (tmp_htinfo_len) {
+ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? \
+ sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
+ memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
}
+ }
+
}
}
@@ -1837,7 +1822,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
network->bssht.bdRT2RTAggregation = true;
if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
- network->bssht.bdRT2RTLongSlotTime = true;
+ network->bssht.bdRT2RTLongSlotTime = true;
}
}
@@ -1858,15 +1843,14 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[1] == 0x10 &&
info_element->data[2] == 0x18)){
- network->broadcom_cap_exist = true;
+ network->broadcom_cap_exist = true;
}
}
if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0c &&
- info_element->data[2] == 0x43)
- {
+ info_element->data[2] == 0x43) {
network->ralink_cap_exist = true;
} else
network->ralink_cap_exist = false;
@@ -1878,8 +1862,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x13 &&
- info_element->data[2] == 0x74))
- {
+ info_element->data[2] == 0x74)) {
printk("========>%s(): athros AP is exist\n", __func__);
network->atheros_cap_exist = true;
} else
@@ -1888,8 +1871,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96)
- {
+ info_element->data[2] == 0x96) {
network->cisco_cap_exist = true;
} else
network->cisco_cap_exist = false;
@@ -1898,22 +1880,18 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x01)
- {
- if (info_element->len == 6)
- {
+ info_element->data[3] == 0x01) {
+ if (info_element->len == 6) {
memcpy(network->CcxRmState, &info_element[4], 2);
if (network->CcxRmState[0] != 0)
- {
network->bCcxRmEnable = true;
- } else
+ else
network->bCcxRmEnable = false;
//
// CCXv4 Table 59-1 MBSSID Masks.
//
network->MBssidMask = network->CcxRmState[1] & 0x07;
- if (network->MBssidMask != 0)
- {
+ if (network->MBssidMask != 0) {
network->bMBssidValid = true;
network->MBssidMask = 0xff << (network->MBssidMask);
ether_addr_copy(network->MBssid, network->bssid);
@@ -1929,8 +1907,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x03)
- {
+ info_element->data[3] == 0x03) {
if (info_element->len == 5) {
network->bWithCcxVerNum = true;
network->BssCcxVerNumber = info_element->data[4];
@@ -1985,16 +1962,14 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
case MFIE_TYPE_AIRONET:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n",
info_element->len);
- if (info_element->len > IE_CISCO_FLAG_POSITION)
- {
+ if (info_element->len > IE_CISCO_FLAG_POSITION) {
network->bWithAironetIE = true;
// CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23):
// "A Cisco access point advertises support for CKIP in beacon and probe response packets,
// by adding an Aironet element and setting one or both of the CKIP negotiation bits."
if ((info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_MIC) ||
- (info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_PK))
- {
+ (info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_PK)) {
network->bCkipSupported = true;
} else {
network->bCkipSupported = false;
@@ -2214,8 +2189,7 @@ static inline void update_network(struct ieee80211_network *dst,
dst->rates_len = src->rates_len;
memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
dst->rates_ex_len = src->rates_ex_len;
- if (src->ssid_len > 0)
- {
+ if (src->ssid_len > 0) {
memset(dst->ssid, 0, dst->ssid_len);
dst->ssid_len = src->ssid_len;
memcpy(dst->ssid, src->ssid, src->ssid_len);
@@ -2224,8 +2198,7 @@ static inline void update_network(struct ieee80211_network *dst,
dst->flags = src->flags;
dst->time_stamp[0] = src->time_stamp[0];
dst->time_stamp[1] = src->time_stamp[1];
- if (src->flags & NETWORK_HAS_ERP_VALUE)
- {
+ if (src->flags & NETWORK_HAS_ERP_VALUE) {
dst->erp_value = src->erp_value;
dst->berp_info_valid = src->berp_info_valid = true;
}
@@ -2289,7 +2262,7 @@ static inline void update_network(struct ieee80211_network *dst,
src->wmm_param[1].aci_aifsn || \
src->wmm_param[2].aci_aifsn || \
src->wmm_param[3].aci_aifsn) {
- memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
+ memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
}
//dst->QoS_Enable = src->QoS_Enable;
#ifdef THOMAS_TURBO
@@ -2379,41 +2352,33 @@ static inline void ieee80211_process_probe_response(
if (!is_legal_channel(ieee, network->channel))
goto out;
- if (ieee->bGlobalDomain)
- {
- if (fc == IEEE80211_STYPE_PROBE_RESP)
- {
- // Case 1: Country code
+ if (ieee->bGlobalDomain) {
+ if (fc == IEEE80211_STYPE_PROBE_RESP) {
if (IS_COUNTRY_IE_VALID(ieee)) {
+ // Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
goto out;
}
- }
- // Case 2: No any country code.
- else
- {
+ } else {
+ // Case 2: No any country code.
// Filter over channel ch12~14
- if (network->channel > 11)
- {
+ if (network->channel > 11) {
printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
goto out;
}
}
} else {
- // Case 1: Country code
if (IS_COUNTRY_IE_VALID(ieee)) {
+ // Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
goto out;
}
- }
- // Case 2: No any country code.
- else
- {
+ } else {
+ // Case 2: No any country code.
// Filter over channel ch12~14
- if (network->channel > 14)
- {
+ if (network->channel > 14) {
printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
goto out;
}
@@ -2436,14 +2401,13 @@ static inline void ieee80211_process_probe_response(
if (is_same_network(&ieee->current_network, network, ieee)) {
update_network(&ieee->current_network, network);
if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G)
- && ieee->current_network.berp_info_valid){
- if (ieee->current_network.erp_value & ERP_UseProtection)
- ieee->current_network.buseprotection = true;
- else
- ieee->current_network.buseprotection = false;
+ && ieee->current_network.berp_info_valid){
+ if (ieee->current_network.erp_value & ERP_UseProtection)
+ ieee->current_network.buseprotection = true;
+ else
+ ieee->current_network.buseprotection = false;
}
- if (is_beacon(beacon->header.frame_ctl))
- {
+ if (is_beacon(beacon->header.frame_ctl)) {
if (ieee->state == IEEE80211_LINKED)
ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
} else //hidden AP
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index e0da0900a4f7..33a6af7aad22 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -743,7 +743,6 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
- crypt = ieee->crypt[ieee->tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 4a8d16a45fc5..b1baaa18b129 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -42,8 +42,8 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
/* if setting by freq convert to channel */
if (fwrq->e == 1) {
- if ((fwrq->m >= (int) 2.412e8 &&
- fwrq->m <= (int) 2.487e8)) {
+ if ((fwrq->m >= (int)2.412e8 &&
+ fwrq->m <= (int)2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
@@ -92,7 +92,7 @@ int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
if (ieee->current_network.channel == 0)
return -1;
/* NM 0.7.0 will not accept channel any more. */
- fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel-1] * 100000;
+ fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel - 1] * 100000;
fwrq->e = 1;
/* fwrq->m = ieee->current_network.channel; */
/* fwrq->e = 0; */
@@ -220,7 +220,7 @@ int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
u32 target_rate = wrqu->bitrate.value;
- ieee->rate = target_rate/100000;
+ ieee->rate = target_rate / 100000;
/* FIXME: we might want to limit rate also in management protocols. */
return 0;
}
@@ -415,9 +415,9 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
if (wrqu->essid.flags && wrqu->essid.length) {
/* first flush current network.ssid */
- len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE;
- strncpy(ieee->current_network.ssid, extra, len+1);
- ieee->current_network.ssid_len = len+1;
+ len = ((wrqu->essid.length - 1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length - 1) : IW_ESSID_MAX_SIZE;
+ strncpy(ieee->current_network.ssid, extra, len + 1);
+ ieee->current_network.ssid_len = len + 1;
ieee->ssid_set = 1;
} else {
ieee->ssid_set = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index fc6eb97801e1..f0b6b8372f91 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -214,7 +214,8 @@ int ieee80211_encrypt_fragment(
}
-void ieee80211_txb_free(struct ieee80211_txb *txb) {
+void ieee80211_txb_free(struct ieee80211_txb *txb)
+{
//int i;
if (unlikely(!txb))
return;
@@ -293,7 +294,7 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
struct tx_ts_record *pTxTs = NULL;
struct rtl_80211_hdr_1addr *hdr = (struct rtl_80211_hdr_1addr *)skb->data;
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
if (!IsQoSDataFrame(skb->data))
return;
@@ -301,13 +302,6 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
if (is_multicast_ether_addr(hdr->addr1))
return;
//check packet and mode later
-#ifdef TO_DO_LIST
- if (pTcb->PacketLength >= 4096)
- return;
- // For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation.
- if (!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
- return;
-#endif
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
return;
}
@@ -333,8 +327,7 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
}
}
FORCED_AGG_SETTING:
- switch (pHTInfo->ForcedAMPDUMode )
- {
+ switch (pHTInfo->ForcedAMPDUMode) {
case HT_AGG_AUTO:
break;
@@ -372,7 +365,7 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
tcb_desc->bUseShortGI = false;
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
if (pHTInfo->bForcedShortGI) {
@@ -380,9 +373,9 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
return;
}
- if ((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
+ if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
- else if ((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
+ else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
@@ -393,16 +386,16 @@ static void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee,
tcb_desc->bPacketBW = false;
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
return;
- if ((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel.
+ if ((tcb_desc->data_rate & 0x80) == 0) // If using legacy rate, it shall use 20MHz channel.
return;
//BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance
- if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
+ if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
return;
}
@@ -418,25 +411,21 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate.
tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz
- if(tcb_desc->bBroadcast || tcb_desc->bMulticast)//only unicast frame will use rts/cts
+ if (tcb_desc->bBroadcast || tcb_desc->bMulticast) //only unicast frame will use rts/cts
return;
- if (is_broadcast_ether_addr(skb->data+16)) //check addr3 as infrastructure add3 is DA.
+ if (is_broadcast_ether_addr(skb->data + 16)) //check addr3 as infrastructure add3 is DA.
return;
- if (ieee->mode < IEEE_N_24G) //b, g mode
- {
+ if (ieee->mode < IEEE_N_24G) /* b, g mode */ {
// (1) RTS_Threshold is compared to the MPDU, not MSDU.
// (2) If there are more than one frag in this MSDU, only the first frag uses protection frame.
// Other fragments are protected by previous fragment.
// So we only need to check the length of first fragment.
- if (skb->len > ieee->rts)
- {
+ if (skb->len > ieee->rts) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
- }
- else if (ieee->current_network.buseprotection)
- {
+ } else if (ieee->current_network.buseprotection) {
// Use CTS-to-SELF in protection mode.
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
@@ -444,43 +433,35 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
}
//otherwise return;
return;
- }
- else
- {// 11n High throughput case.
+ } else { // 11n High throughput case.
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- while (true)
- {
+ while (true) {
//check ERP protection
- if (ieee->current_network.buseprotection)
- {// CTS-to-SELF
+ if (ieee->current_network.buseprotection) {// CTS-to-SELF
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
break;
}
//check HT op mode
- if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
- {
+ if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
u8 HTOpMode = pHTInfo->CurrentOpMode;
- if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
- (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
- {
+ if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
+ (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
tcb_desc->bRTSEnable = true;
break;
}
}
//check rts
- if (skb->len > ieee->rts)
- {
+ if (skb->len > ieee->rts) {
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
tcb_desc->bRTSEnable = true;
break;
}
//to do list: check MIMO power save condition.
//check AMPDU aggregation for TXOP
- if(tcb_desc->bAMPDUEnable)
- {
+ if (tcb_desc->bAMPDUEnable) {
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
// According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads
// throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily
@@ -488,8 +469,7 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
break;
}
//check IOT action
- if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
- {
+ if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
@@ -508,7 +488,7 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
if (ieee->mode == IW_MODE_MASTER)
- goto NO_PROTECTION;
+ goto NO_PROTECTION;
return;
NO_PROTECTION:
tcb_desc->bRTSEnable = false;
@@ -522,27 +502,12 @@ NO_PROTECTION:
static void ieee80211_txrate_selectmode(struct ieee80211_device *ieee,
struct cb_desc *tcb_desc)
{
-#ifdef TO_DO_LIST
- if (!IsDataFrame(pFrame)) {
- pTcb->bTxDisableRateFallBack = true;
- pTcb->bTxUseDriverAssingedRate = true;
- pTcb->RATRIndex = 7;
- return;
- }
-
- if (pMgntInfo->ForcedDataRate!= 0) {
- pTcb->bTxDisableRateFallBack = true;
- pTcb->bTxUseDriverAssingedRate = true;
- return;
- }
-#endif
if (ieee->bTxDisableRateFallBack)
tcb_desc->bTxDisableRateFallBack = true;
if (ieee->bTxUseDriverAssingedRate)
tcb_desc->bTxUseDriverAssingedRate = true;
- if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
- {
+ if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) {
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
tcb_desc->RATRIndex = 0;
}
@@ -553,11 +518,9 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
{
if (is_multicast_ether_addr(dst))
return;
- if (IsQoSDataFrame(skb->data)) //we deal qos data only
- {
+ if (IsQoSDataFrame(skb->data)) /* we deal qos data only */ {
struct tx_ts_record *pTS = NULL;
- if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true))
- {
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true)) {
return;
}
pTS->tx_cur_seq = (pTS->tx_cur_seq + 1) % 4096;
@@ -592,7 +555,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* If there is no driver handler to take the TXB, dont' bother
* creating it...
*/
- if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
+ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) ||
((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
printk(KERN_WARNING "%s: No xmit handler.\n",
ieee->dev->name);
@@ -631,7 +594,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* Save source and destination addresses */
memcpy(&dest, skb->data, ETH_ALEN);
- memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
+ memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
/* Advance the SKB to the start of the payload */
skb_pull(skb, sizeof(struct ethhdr));
@@ -646,7 +609,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
fc = IEEE80211_FTYPE_DATA;
//if(ieee->current_network.QoS_Enable)
- if(qos_actived)
+ if (qos_actived)
fc |= IEEE80211_STYPE_QOS_DATA;
else
fc |= IEEE80211_STYPE_DATA;
@@ -740,7 +703,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
tcb_desc = (struct cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
- if(qos_actived){
+ if (qos_actived) {
skb_frag->priority = skb->priority;//UP2AC(skb->priority);
tcb_desc->queue_index = UP2AC(skb->priority);
} else {
@@ -749,15 +712,13 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_reserve(skb_frag, ieee->tx_headroom);
- if (encrypt){
+ if (encrypt) {
if (ieee->hwsec_active)
tcb_desc->bHwSec = 1;
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
- }
- else
- {
+ } else {
tcb_desc->bHwSec = 0;
}
frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
@@ -775,12 +736,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
bytes = bytes_last_frag;
}
//if(ieee->current_network.QoS_Enable)
- if(qos_actived)
- {
+ if (qos_actived) {
// add 1 only indicate to corresponding seq number control 2006/7/12
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
+ frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority) + 1] << 4 | i);
} else {
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
+ frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
}
/* Put a SNAP header on the first fragment */
@@ -806,17 +766,16 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
skb_put(skb_frag, 4);
}
- if(qos_actived)
- {
- if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
- ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
- else
- ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
+ if (qos_actived) {
+ if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
+ else
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
}
} else {
if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
@@ -826,7 +785,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
}
txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
- if(!txb){
+ if (!txb) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
@@ -839,8 +798,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
- if (txb)
- {
+ if (txb) {
struct cb_desc *tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bTxEnableFwCalcDur = 1;
if (is_multicast_ether_addr(header.addr1))
@@ -862,9 +820,9 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
if (txb) {
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
+ if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
ieee80211_softmac_xmit(txb, ieee);
- }else{
+ } else {
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += __le16_to_cpu(txb->payload_size);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index be08cd1d37a7..9dd5c04181ea 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -70,10 +70,10 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
- for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
+ for (i = 0; i < ARRAY_SIZE(ieee80211_modes); i++) {
if (network->mode & BIT(i)) {
- sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
- pname +=ieee80211_modes[i].mode_size;
+ sprintf(pname, ieee80211_modes[i].mode_string, ieee80211_modes[i].mode_size);
+ pname += ieee80211_modes[i].mode_size;
}
}
*pname = '\0';
@@ -130,8 +130,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
max_rate = rate;
}
- if (network->mode >= IEEE_N_24G)//add N rate here;
- {
+ if (network->mode >= IEEE_N_24G) /* add N rate here */ {
struct ht_capability_ele *ht_cap = NULL;
bool is40M = false, isShortGI = false;
u8 max_mcs = 0;
@@ -139,13 +138,13 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
ht_cap = (struct ht_capability_ele *)&network->bssht.bdHTCapBuf[4];
else
ht_cap = (struct ht_capability_ele *)&network->bssht.bdHTCapBuf[0];
- is40M = (ht_cap->ChlWidth)?1:0;
- isShortGI = (ht_cap->ChlWidth)?
- ((ht_cap->ShortGI40Mhz)?1:0):
- ((ht_cap->ShortGI20Mhz)?1:0);
+ is40M = (ht_cap->ChlWidth) ? 1 : 0;
+ isShortGI = (ht_cap->ChlWidth) ?
+ ((ht_cap->ShortGI40Mhz) ? 1 : 0) :
+ ((ht_cap->ShortGI20Mhz) ? 1 : 0);
max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL);
- rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs&0x7f];
+ rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
if (rate > max_rate)
max_rate = rate;
}
@@ -178,7 +177,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+ start = iwe_stream_add_point(info, start, stop, &iwe, custom);
if (ieee->wpa_enabled && network->wpa_ie_len) {
char buf[MAX_WPA_IE_LEN * 2 + 30];
@@ -219,7 +218,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+ start = iwe_stream_add_point(info, start, stop, &iwe, custom);
return start;
}
@@ -243,7 +242,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
list_for_each_entry(network, &ieee->network_list, list) {
i++;
- if((stop-ev)<200) {
+ if ((stop - ev) < 200) {
err = -E2BIG;
break;
}
@@ -454,7 +453,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
IEEE80211_DEBUG_WX("GET_ENCODE\n");
- if(ieee->iw_mode == IW_MODE_MONITOR)
+ if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
key = erq->flags & IW_ENCODE_INDEX;
@@ -571,7 +570,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
ret = -EINVAL;
goto done;
}
- printk("alg name:%s\n",alg);
+ printk("alg name:%s\n", alg);
ops = try_then_request_module(ieee80211_get_crypto_ops(alg), module);
if (!ops) {
@@ -688,7 +687,7 @@ int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
- if (strcmp(crypt->ops->name, "WEP") == 0 )
+ if (strcmp(crypt->ops->name, "WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
else if (strcmp(crypt->ops->name, "TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
@@ -712,7 +711,7 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct iw_mlme *mlme = (struct iw_mlme *) extra;
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
@@ -765,7 +764,7 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
break;
case IW_AUTH_WPA_ENABLED:
- ieee->wpa_enabled = (data->value)?1:0;
+ ieee->wpa_enabled = (data->value) ? 1 : 0;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
@@ -785,14 +784,14 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
u8 *buf;
- if (len>MAX_WPA_IE_LEN || (len && !ie)) {
- // printk("return error out, len:%d\n", len);
- return -EINVAL;
+ if (len > MAX_WPA_IE_LEN || (len && !ie)) {
+ //printk("return error out, len:%d\n", len);
+ return -EINVAL;
}
if (len) {
- if (len != ie[1]+2) {
+ if (len != ie[1] + 2) {
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 53869b3c985c..379a2ccf4d9f 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -162,7 +162,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, s
tag += 2;
}
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
return skb;
//return NULL;
}
@@ -229,7 +229,7 @@ static struct sk_buff *ieee80211_DELBA(
put_unaligned_le16(ReasonCode, tag);
tag += 2;
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
if (net_ratelimit())
IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA,
"<=====%s()\n", __func__);
@@ -331,9 +331,9 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
return -1;
}
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
- req = (struct rtl_80211_hdr_3addr *) skb->data;
+ req = (struct rtl_80211_hdr_3addr *)skb->data;
tag = (u8 *)req;
dst = &req->addr2[0];
tag += sizeof(struct rtl_80211_hdr_3addr);
@@ -556,7 +556,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
return -1;
}
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
delba = (struct rtl_80211_hdr_3addr *)skb->data;
dst = &delba->addr2[0];
pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
@@ -643,7 +643,7 @@ TsInitDelBA(struct ieee80211_device *ieee, struct ts_common_info *pTsCommonInfo,
ieee80211_send_DELBA(
ieee,
pTsCommonInfo->addr,
- (pTxTs->tx_admitted_ba_record.valid)?(&pTxTs->tx_admitted_ba_record):(&pTxTs->tx_pending_ba_record),
+ (pTxTs->tx_admitted_ba_record.valid) ? (&pTxTs->tx_admitted_ba_record) : (&pTxTs->tx_pending_ba_record),
TxRxSelect,
DELBA_REASON_END_BA);
} else if (TxRxSelect == RX_DIR) {
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
index b7769bca9740..79346a00af09 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
@@ -253,10 +253,10 @@ extern u8 MCS_FILTER_1SS[16];
/* 2007/07/12 MH We only define legacy and HT wireless mode now. */
#define LEGACY_WIRELESS_MODE IEEE_MODE_MASK
-#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
- ((WirelessMode & (LEGACY_WIRELESS_MODE)) != 0) ?\
- (LegacyRate) :\
- (PICK_RATE(LegacyRate, HTRate))
+#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
+ ((WirelessMode & (LEGACY_WIRELESS_MODE)) != 0) ? \
+ (LegacyRate) : \
+ (PICK_RATE(LegacyRate, HTRate))
// MCS Bw 40 {1~7, 12~15,32}
#define RATE_ADPT_1SS_MASK 0xFF
@@ -270,11 +270,10 @@ typedef enum _HT_AGGRE_SIZE {
HT_AGG_SIZE_16K = 1,
HT_AGG_SIZE_32K = 2,
HT_AGG_SIZE_64K = 3,
-}HT_AGGRE_SIZE_E, *PHT_AGGRE_SIZE_E;
+} HT_AGGRE_SIZE_E, *PHT_AGGRE_SIZE_E;
/* Indicate different AP vendor for IOT issue */
-typedef enum _HT_IOT_PEER
-{
+typedef enum _HT_IOT_PEER {
HT_IOT_PEER_UNKNOWN = 0,
HT_IOT_PEER_REALTEK = 1,
HT_IOT_PEER_BROADCOM = 2,
@@ -282,7 +281,7 @@ typedef enum _HT_IOT_PEER
HT_IOT_PEER_ATHEROS = 4,
HT_IOT_PEER_CISCO = 5,
HT_IOT_PEER_MAX = 6
-}HT_IOT_PEER_E, *PHTIOT_PEER_E;
+} HT_IOT_PEER_E, *PHTIOT_PEER_E;
/*
* IOT Action for different AP
@@ -298,6 +297,6 @@ typedef enum _HT_IOT_ACTION {
HT_IOT_ACT_CDD_FSYNC = 0x00000080,
HT_IOT_ACT_PURE_N_MODE = 0x00000100,
HT_IOT_ACT_FORCED_CTS2SELF = 0x00000200,
-}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
+} HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
#endif //_RTL819XU_HTTYPE_H_
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index c73a8058cf87..dba3f2db9f48 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -93,10 +93,6 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
-#ifdef TO_DO_LIST
- // 8190 only. Assign duration operation mode to firmware
- pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur;
-#endif
/*
* 8190 only, Realtek proprietary aggregation mode
* Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 59d179ae7ad2..5cee1031a27c 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -105,7 +105,7 @@ static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
eth_zero_addr(pTsCommonInfo->addr);
memset(&pTsCommonInfo->t_spec, 0, sizeof(struct tspec_body));
- memset(&pTsCommonInfo->t_class, 0, sizeof(union qos_tclas)*TCLAS_NUM);
+ memset(&pTsCommonInfo->t_class, 0, sizeof(union qos_tclas) * TCLAS_NUM);
pTsCommonInfo->t_clas_proc = 0;
pTsCommonInfo->t_clas_num = 0;
}
@@ -180,14 +180,12 @@ void TSInitialize(struct ieee80211_device *ieee)
}
// Initialize unused Rx Reorder List.
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
-//#ifdef TO_DO_LIST
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
- if (count == (REORDER_ENTRY_NUM-1))
+ if (count == (REORDER_ENTRY_NUM - 1))
break;
- pRxReorderEntry = &ieee->RxReorderEntry[count+1];
+ pRxReorderEntry = &ieee->RxReorderEntry[count + 1];
}
-//#endif
}
static void AdmitTS(struct ieee80211_device *ieee,
@@ -259,7 +257,7 @@ static struct ts_common_info *SearchAdmitTRStream(struct ieee80211_device *ieee,
}
if (&pRet->list != psearch_list)
- return pRet ;
+ return pRet;
else
return NULL;
}
@@ -367,8 +365,8 @@ bool GetTs(
(&ieee->Rx_TS_Admit_List);
enum direction_value Dir = (ieee->iw_mode == IW_MODE_MASTER) ?
- ((TxRxSelect == TX_DIR)?DIR_DOWN:DIR_UP) :
- ((TxRxSelect == TX_DIR)?DIR_UP:DIR_DOWN);
+ ((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
+ ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next, struct ts_common_info, list);
@@ -417,7 +415,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *
TsInitDelBA(ieee, pTs, TxRxSelect);
if (TxRxSelect == RX_DIR) {
-//#ifdef TO_DO_LIST
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
if (timer_pending(&pRxTS->rx_pkt_pending_timer))
@@ -445,7 +442,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
-//#endif
} else {
struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
del_timer_sync(&pTxTS->ts_add_ba_timer);
@@ -530,7 +526,7 @@ void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTx
jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
} else {
IEEE80211_DEBUG(IEEE80211_DL_BA, "%s: Immediately Start ADDBA now!!\n", __func__);
- mod_timer(&pTxTS->ts_add_ba_timer, jiffies+10); //set 10 ticks
+ mod_timer(&pTxTS->ts_add_ba_timer, jiffies + 10); //set 10 ticks
}
} else {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __func__);
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index de83daa0c9ed..2527cea60e3e 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -39,7 +39,6 @@ static void eprom_cs(struct net_device *dev, short bit)
udelay(EPROM_DELAY);
}
-
static void eprom_ck_cycle(struct net_device *dev)
{
u8 cmdreg;
@@ -58,7 +57,6 @@ static void eprom_ck_cycle(struct net_device *dev)
udelay(EPROM_DELAY);
}
-
static void eprom_w(struct net_device *dev, short bit)
{
u8 cmdreg;
@@ -76,7 +74,6 @@ static void eprom_w(struct net_device *dev, short bit)
udelay(EPROM_DELAY);
}
-
static short eprom_r(struct net_device *dev)
{
u8 bit;
@@ -94,7 +91,6 @@ static short eprom_r(struct net_device *dev)
return 0;
}
-
static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
{
int i;
@@ -105,7 +101,6 @@ static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
}
}
-
int eprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -119,7 +114,7 @@ int eprom_read(struct net_device *dev, u32 addr)
ret = 0;
/* enable EPROM programming */
write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT));
+ (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
force_pci_posting(dev);
udelay(EPROM_DELAY);
@@ -162,7 +157,7 @@ int eprom_read(struct net_device *dev, u32 addr)
if (err < 0)
return err;
- ret |= err<<(15-i);
+ ret |= err << (15 - i);
}
eprom_cs(dev, 0);
@@ -170,6 +165,6 @@ int eprom_read(struct net_device *dev, u32 addr)
/* disable EPROM programming */
write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
+ (EPROM_CMD_NORMAL << EPROM_CMD_OPERATING_MODE_SHIFT));
return ret;
}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 0bedf88525cd..b169460b9f26 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -42,9 +42,9 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
- if (priv->card_8192_version == VERSION_819XU_A
- || priv->card_8192_version
- == VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
+ if (priv->card_8192_version == VERSION_819XU_A ||
+ priv->card_8192_version ==
+ VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
rtl8192_phy_SetRFReg(dev,
(enum rf90_radio_path_e)eRFPath,
0x0b, bMask12Bits, 0x100); /* phy para:1ba */
@@ -79,10 +79,10 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
default:
RT_TRACE(COMP_ERR, "phy_set_rf8256_bandwidth(): unknown Bandwidth: %#X\n", Bandwidth);
break;
-
}
}
}
+
/*--------------------------------------------------------------------------
* Overview: Interface to config 8256
* Input: struct net_device* dev
@@ -101,6 +101,7 @@ void phy_rf8256_config(struct net_device *dev)
/* Config BB and RF */
phy_rf8256_config_para_file(dev);
}
+
/*--------------------------------------------------------------------------
* Overview: Interface to config 8256
* Input: struct net_device* dev
@@ -137,12 +138,12 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
break;
case RF90_PATH_B:
case RF90_PATH_D:
- u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV << 16);
break;
}
/*----Set RF_ENV enable----*/
- rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
/*----Set RF_ENV output high----*/
rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
@@ -151,7 +152,7 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 */
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? */
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e) eRFPath, 0x0, bMask12Bits, 0xbf);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0, bMask12Bits, 0xbf);
/* Check RF block (for FPGA platform only)----
* TODO: this function should be removed on ASIC , Emily 2007.2.2
@@ -207,7 +208,7 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
break;
case RF90_PATH_B:
case RF90_PATH_D:
- rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
break;
}
@@ -215,7 +216,6 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
RT_TRACE(COMP_ERR, "phy_rf8256_config_para_file():Radio[%d] Fail!!", eRFPath);
goto phy_RF8256_Config_ParaFile_Fail;
}
-
}
RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
@@ -225,11 +225,11 @@ phy_RF8256_Config_ParaFile_Fail:
RT_TRACE(COMP_ERR, "PHY Initialization failed\n");
}
-
void phy_set_rf8256_cck_tx_power(struct net_device *dev, u8 powerlevel)
{
u32 TxAGC = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
+
TxAGC = powerlevel;
if (priv->bDynamicTxLowPower) {
@@ -244,7 +244,6 @@ void phy_set_rf8256_cck_tx_power(struct net_device *dev, u8 powerlevel)
rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
}
-
void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -255,16 +254,16 @@ void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
u8 byte0, byte1, byte2, byte3;
powerBase0 = powerlevel + priv->TxPowerDiff; /* OFDM rates */
- powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0;
powerBase1 = powerlevel; /* MCS rates */
- powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
+ powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
for (index = 0; index < 6; index++) {
- writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index < 2)?powerBase0:powerBase1);
+ writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index < 2) ? powerBase0 : powerBase1);
byte0 = (u8)(writeVal & 0x7f);
- byte1 = (u8)((writeVal & 0x7f00)>>8);
- byte2 = (u8)((writeVal & 0x7f0000)>>16);
- byte3 = (u8)((writeVal & 0x7f000000)>>24);
+ byte1 = (u8)((writeVal & 0x7f00) >> 8);
+ byte2 = (u8)((writeVal & 0x7f0000) >> 16);
+ byte3 = (u8)((writeVal & 0x7f000000) >> 24);
if (byte0 > 0x24)
/* Max power index = 0x24 */
@@ -278,7 +277,7 @@ void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
/* for tx power track */
if (index == 3) {
- writeVal_tmp = (byte3<<24) | (byte2<<16) | (byte1<<8) | byte0;
+ writeVal_tmp = (byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0;
priv->Pwr_Track = writeVal_tmp;
}
@@ -288,10 +287,9 @@ void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
*/
writeVal = 0x03030303;
} else {
- writeVal = (byte3<<24) | (byte2<<16) | (byte1<<8) | byte0;
+ writeVal = (byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0;
}
rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
}
return;
-
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 569d02240bf5..2821411878ce 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2076,14 +2076,6 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
wireless_mode = WIRELESS_MODE_B;
}
}
-#ifdef TO_DO_LIST
- /* TODO: this function doesn't work well at this time,
- * we should wait for FPGA
- */
- ActUpdateChannelAccessSetting(
- pAdapter, pHalData->CurrentWirelessMode,
- &pAdapter->MgntInfo.Info8185.ChannelAccessSetting);
-#endif
priv->ieee80211->mode = wireless_mode;
if (wireless_mode == WIRELESS_MODE_N_24G ||
@@ -2159,12 +2151,6 @@ static int rtl8192_init_priv_variable(struct net_device *dev)
priv->ieee80211->InitialGainHandler = InitialGain819xUsb;
priv->card_type = USB;
-#ifdef TO_DO_LIST
- if (Adapter->bInHctTest) {
- pHalData->ShortRetryLimit = 7;
- pHalData->LongRetryLimit = 7;
- }
-#endif
priv->ShortRetryLimit = 0x30;
priv->LongRetryLimit = 0x30;
priv->EarlyRxThreshold = 7;
@@ -2180,34 +2166,6 @@ static int rtl8192_init_priv_variable(struct net_device *dev)
* TRUE: SW provides them
*/
(false ? TCR_SAT : 0);
-#ifdef TO_DO_LIST
- if (Adapter->bInHctTest)
- pHalData->ReceiveConfig =
- pHalData->CSMethod |
- /* accept management/data */
- RCR_AMF | RCR_ADF |
- /* accept control frame for SW
- * AP needs PS-poll
- */
- RCR_ACF |
- /* accept BC/MC/UC */
- RCR_AB | RCR_AM | RCR_APM |
- /* accept ICV/CRC error
- * packet
- */
- RCR_AICV | RCR_ACRC32 |
- /* Max DMA Burst Size per Tx
- * DMA Burst, 7: unlimited.
- */
- ((u32)7 << RCR_MXDMA_OFFSET) |
- /* Rx FIFO Threshold,
- * 7: No Rx threshold.
- */
- (pHalData->EarlyRxThreshold << RCR_FIFO_OFFSET) |
- (pHalData->EarlyRxThreshold == 7 ? RCR_OnlyErlPkt : 0);
- else
-
-#endif
priv->ReceiveConfig =
/* accept management/data */
RCR_AMF | RCR_ADF |
@@ -2665,19 +2623,10 @@ static void rtl8192_hwconfig(struct net_device *dev)
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_AUTO:
-#ifdef TO_DO_LIST
- if (Adapter->bInHctTest) {
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- } else
-#endif
- {
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- }
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_N_24G:
/* It support CCK rate by default. CCK rate will be filtered
@@ -2848,48 +2797,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
}
RT_TRACE(COMP_INIT, "%s():after firmware download\n", __func__);
-#ifdef TO_DO_LIST
- if (Adapter->ResetProgress == RESET_TYPE_NORESET) {
- if (pMgntInfo->RegRfOff) { /* User disable RF via registry. */
- RT_TRACE((COMP_INIT | COMP_RF), DBG_LOUD,
- ("InitializeAdapter819xUsb(): Turn off RF for RegRfOff ----------\n"));
- MgntActSet_RF_State(Adapter, eRfOff, RF_CHANGE_BY_SW);
- /* Those actions will be discard in MgntActSet_RF_State
- * because of the same state
- */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
- PHY_SetRFReg(Adapter,
- (enum rf90_radio_path_e)eRFPath,
- 0x4, 0xC00, 0x0);
- } else if (pMgntInfo->RfOffReason > RF_CHANGE_BY_PS) {
- /* H/W or S/W RF OFF before sleep. */
- RT_TRACE((COMP_INIT | COMP_RF), DBG_LOUD,
- ("InitializeAdapter819xUsb(): Turn off RF for RfOffReason(%d) ----------\n",
- pMgntInfo->RfOffReason));
- MgntActSet_RF_State(Adapter,
- eRfOff,
- pMgntInfo->RfOffReason);
- } else {
- pHalData->eRFPowerState = eRfOn;
- pMgntInfo->RfOffReason = 0;
- RT_TRACE((COMP_INIT | COMP_RF), DBG_LOUD,
- ("InitializeAdapter819xUsb(): RF is on ----------\n"));
- }
- } else {
- if (pHalData->eRFPowerState == eRfOff) {
- MgntActSet_RF_State(Adapter,
- eRfOff,
- pMgntInfo->RfOffReason);
- /* Those actions will be discard in MgntActSet_RF_State
- * because of the same state
- */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
- PHY_SetRFReg(Adapter,
- (enum rf90_radio_path_e)eRFPath,
- 0x4, 0xC00, 0x0);
- }
- }
-#endif
/* config RF. */
if (priv->ResetProgress == RESET_TYPE_NORESET) {
rtl8192_phy_RFConfig(dev);
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 153d4ee0ec07..dd81d210bd49 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -231,7 +231,7 @@ bool init_firmware(struct net_device *dev)
rst_opt = OPT_FIRMWARE_RESET;
starting_state = FW_INIT_STEP2_DATA;
} else {
- RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n");
+ RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n");
}
/*
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 5f04afe53d69..c04d8eca0cfb 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -516,16 +516,6 @@ static void rtl8192_phyConfigBB(struct net_device *dev,
{
u32 i;
-#ifdef TO_DO_LIST
- u32 *rtl8192PhyRegArrayTable = NULL, *rtl8192AgcTabArrayTable = NULL;
-
- if (Adapter->bInHctTest) {
- PHY_REGArrayLen = PHY_REGArrayLengthDTM;
- AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM;
- Rtl8190PHY_REGArray_Table = Rtl819XPHY_REGArrayDTM;
- Rtl8190AGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM;
- }
-#endif
if (ConfigType == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < PHY_REG_1T2RArrayLength; i += 2) {
rtl8192_setBBreg(dev, Rtl8192UsbPHY_REG_1T2RArray[i],
@@ -1059,10 +1049,6 @@ static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
switch (priv->rf_chip) {
case RF_8225:
-#ifdef TO_DO_LIST
- PHY_SetRF8225CckTxPower(Adapter, powerlevel);
- PHY_SetRF8225OfdmTxPower(Adapter, powerlevelOFDM24G);
-#endif
break;
case RF_8256:
@@ -1160,48 +1146,6 @@ bool rtl8192_SetRFPowerState(struct net_device *dev,
RT_TRACE(COMP_ERR, "Not support rf_chip(%x)\n", priv->rf_chip);
break;
}
-#ifdef TO_DO_LIST
- if (bResult) {
- /* Update current RF state variable. */
- pHalData->eRFPowerState = eRFPowerState;
- switch (pHalData->RFChipID) {
- case RF_8256:
- switch (pHalData->eRFPowerState) {
- case eRfOff:
- /* If Rf off reason is from IPS,
- * LED should blink with no link
- */
- if (pMgntInfo->RfOffReason == RF_CHANGE_BY_IPS)
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
- else
- /* Turn off LED if RF is not ON. */
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_POWER_OFF);
- break;
-
- case eRfOn:
- /* Turn on RF we are still linked, which might
- * happen when we quickly turn off and on HW RF.
- */
- if (pMgntInfo->bMediaConnect)
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_LINK);
- else
- /* Turn off LED if RF is not ON. */
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
- break;
-
- default:
- break;
- }
- break;
-
- default:
- RT_TRACE(COMP_RF, DBG_LOUD, "%s(): Unknown RF type\n",
- __func__);
- break;
- }
-
- }
-#endif
priv->SetRFPowerStateInProgress = false;
return bResult;
@@ -1628,9 +1572,6 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
/* <3> Set RF related register */
switch (priv->rf_chip) {
case RF_8225:
-#ifdef TO_DO_LIST
- PHY_SetRF8225Bandwidth(Adapter, pHalData->CurrentChannelBW);
-#endif
break;
case RF_8256:
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 28941423b7ed..c20dd5a6bbd1 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -11,8 +11,8 @@
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
-#ifndef _IO_H_
-#define _IO_H_
+#ifndef _RTL871X_IO_H_
+#define _RTL871X_IO_H_
#include "osdep_service.h"
#include "osdep_intf.h"
@@ -234,5 +234,4 @@ void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
uint r8712_alloc_io_queue(struct _adapter *adapter);
void r8712_free_io_queue(struct _adapter *adapter);
-#endif /*_RTL8711_IO_H_*/
-
+#endif /*_RTL871X_IO_H_*/
diff --git a/drivers/staging/rtl8712/rtl871x_rf.h b/drivers/staging/rtl8712/rtl871x_rf.h
index cc54453cd424..7d98921a48fa 100644
--- a/drivers/staging/rtl8712/rtl871x_rf.h
+++ b/drivers/staging/rtl8712/rtl871x_rf.h
@@ -52,5 +52,4 @@ enum {
RTL8712_RFC_2T2R = 0x22
};
-#endif /*_RTL8711_RF_H_*/
-
+#endif /*__RTL871X_RF_H_*/
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 1a5b966a167e..be731f1a2209 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -300,7 +300,6 @@ static inline unsigned char *get_da(unsigned char *pframe)
return da;
}
-
static inline unsigned char *get_sa(unsigned char *pframe)
{
unsigned char *sa;
@@ -346,8 +345,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
return sa;
}
-
-
/*-----------------------------------------------------------------------------
* Below is for the security related definition
*-----------------------------------------------------------------------------
@@ -392,7 +389,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _RESERVED47_ 47
-
/* ---------------------------------------------------------------------------
* Below is the fixed elements...
* ---------------------------------------------------------------------------
@@ -436,7 +432,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _WMM_IE_Length_ 7 /* for WMM STA */
#define _WMM_Para_Element_Length_ 24
-
/*-----------------------------------------------------------------------------
* Below is the definition for 802.11n
*------------------------------------------------------------------------------
@@ -456,7 +451,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define GetOrderBit(pbuf) (((*(__le16 *)(pbuf)) & \
le16_to_cpu(_ORDER_)) != 0)
-
/**
* struct ieee80211_bar - HT Block Ack Request
*
@@ -476,7 +470,6 @@ struct ieee80211_bar {
#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
-
/*
* struct ieee80211_ht_cap - HT capabilities
*
@@ -552,7 +545,6 @@ struct ieee80211_ht_addt_info {
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-
/* Spatial Multiplexing Power Save Modes */
#define WLAN_HT_CAP_SM_PS_STATIC 0
#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 02f5478845b4..6d18d23acdc0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -13,7 +13,6 @@ extern unsigned char RTW_WPA_OUI[];
extern unsigned char WMM_OUI[];
extern unsigned char WPS_OUI[];
extern unsigned char P2P_OUI[];
-extern unsigned char WFD_OUI[];
void init_mlme_ap_info(struct adapter *padapter)
{
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 62b42e29e4fd..4075de07e0a9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -821,12 +821,12 @@ static void pwr_rpwm_timeout_handler(struct timer_list *t)
_set_workitem(&pwrpriv->rpwmtimeoutwi);
}
-static __inline void register_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
+static inline void register_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
{
pwrctrl->alives |= tag;
}
-static __inline void unregister_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
+static inline void unregister_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
{
pwrctrl->alives &= ~tag;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index d78fbbc98fa2..ea3ea2a6b314 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -451,7 +451,7 @@ void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigne
mutex_unlock(&(adapter_to_dvobj(padapter)->setch_mutex));
}
-__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
+inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
{
return pnetwork->MacAddress;
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 19486f0e0ead..6e4a1fcb8790 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -482,10 +482,8 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
*pU4Tmp = BTC_WIFI_BW_LEGACY;
else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_20)
*pU4Tmp = BTC_WIFI_BW_HT20;
- else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40)
- *pU4Tmp = BTC_WIFI_BW_HT40;
else
- *pU4Tmp = BTC_WIFI_BW_HT40; /* todo */
+ *pU4Tmp = BTC_WIFI_BW_HT40;
break;
case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 8c1ade44ed81..8d7fce1e39b7 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -478,7 +478,7 @@ struct sdio_data intf_data;
#define dvobj_to_pwrctl(dvobj) (&(dvobj->pwrctl_priv))
#define pwrctl_to_dvobj(pwrctl) container_of(pwrctl, struct dvobj_priv, pwrctl_priv)
-__inline static struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
+static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
{
/* todo: get interface type from dvobj and the return the dev accordingly */
#ifdef RTW_DVOBJ_CHIP_HW_TYPE
@@ -634,14 +634,14 @@ struct adapter {
/* define RTW_DISABLE_FUNC(padapter, func) (atomic_add(&adapter_to_dvobj(padapter)->disable_func, (func))) */
/* define RTW_ENABLE_FUNC(padapter, func) (atomic_sub(&adapter_to_dvobj(padapter)->disable_func, (func))) */
-__inline static void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
+static inline void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
{
int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
df |= func_bit;
atomic_set(&adapter_to_dvobj(padapter)->disable_func, df);
}
-__inline static void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
+static inline void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
{
int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
df &= ~(func_bit);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index d2616af95ffa..81a9c19ecc6a 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -110,12 +110,12 @@ int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
extern void _rtw_init_queue(struct __queue *pqueue);
-static __inline void thread_enter(char *name)
+static inline void thread_enter(char *name)
{
allow_signal(SIGTERM);
}
-__inline static void flush_signals_thread(void)
+static inline void flush_signals_thread(void)
{
if (signal_pending (current))
{
@@ -125,7 +125,7 @@ __inline static void flush_signals_thread(void)
#define rtw_warn_on(condition) WARN_ON(condition)
-__inline static int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *parg4)
+static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *parg4)
{
int ret = true;
@@ -136,7 +136,7 @@ __inline static int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *p
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
-__inline static u32 _RND4(u32 sz)
+static inline u32 _RND4(u32 sz)
{
u32 val;
@@ -147,7 +147,7 @@ __inline static u32 _RND4(u32 sz)
}
-__inline static u32 _RND8(u32 sz)
+static inline u32 _RND8(u32 sz)
{
u32 val;
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 2f1b51e614fb..c582ede1ac12 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -64,12 +64,12 @@
typedef struct work_struct _workitem;
-__inline static struct list_head *get_next(struct list_head *list)
+static inline struct list_head *get_next(struct list_head *list)
{
return list->next;
}
-__inline static struct list_head *get_list_head(struct __queue *queue)
+static inline struct list_head *get_list_head(struct __queue *queue)
{
return (&(queue->queue));
}
@@ -78,28 +78,28 @@ __inline static struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
container_of(ptr, type, member)
-__inline static void _set_timer(_timer *ptimer, u32 delay_time)
+static inline void _set_timer(_timer *ptimer, u32 delay_time)
{
mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
}
-__inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
+static inline void _cancel_timer(_timer *ptimer, u8 *bcancelled)
{
del_timer_sync(ptimer);
*bcancelled = true;/* true == 1; false == 0 */
}
-__inline static void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
+static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
{
INIT_WORK(pwork, pfunc);
}
-__inline static void _set_workitem(_workitem *pwork)
+static inline void _set_workitem(_workitem *pwork)
{
schedule_work(pwork);
}
-__inline static void _cancel_workitem_sync(_workitem *pwork)
+static inline void _cancel_workitem_sync(_workitem *pwork)
{
cancel_work_sync(pwork);
}
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index 2223e1f139f2..362737b83c3a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -496,13 +496,13 @@ extern sint rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
extern sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, sint keyid, u8 set_tx, bool enqueue);
extern sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv);
-__inline static u8 *get_bssid(struct mlme_priv *pmlmepriv)
+static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress => bssid */
/* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress => ibss mac address */
return pmlmepriv->cur_network.network.MacAddress;
}
-__inline static sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
+static inline sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
if (pmlmepriv->fw_state & state)
return true;
@@ -510,7 +510,7 @@ __inline static sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
return false;
}
-__inline static sint get_fwstate(struct mlme_priv *pmlmepriv)
+static inline sint get_fwstate(struct mlme_priv *pmlmepriv)
{
return pmlmepriv->fw_state;
}
@@ -522,7 +522,7 @@ __inline static sint get_fwstate(struct mlme_priv *pmlmepriv)
* ### NOTE:#### (!!!!)
* MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
*/
-__inline static void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
+static inline void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
@@ -531,7 +531,7 @@ __inline static void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
}
}
-__inline static void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
+static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
@@ -544,7 +544,7 @@ __inline static void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
* No Limit on the calling context,
* therefore set it to be the critical section...
*/
-__inline static void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
+static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, state) == true)
@@ -552,7 +552,7 @@ __inline static void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
spin_unlock_bh(&pmlmepriv->lock);
}
-__inline static void set_scanned_network_val(struct mlme_priv *pmlmepriv, sint val)
+static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, sint val)
{
spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned = val;
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 5de946e66302..012d8f54814f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -405,7 +405,7 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
-__inline static u8 *get_rxmem(union recv_frame *precvframe)
+static inline u8 *get_rxmem(union recv_frame *precvframe)
{
/* always return rx_head... */
if (precvframe == NULL)
@@ -414,7 +414,7 @@ __inline static u8 *get_rxmem(union recv_frame *precvframe)
return precvframe->u.hdr.rx_head;
}
-__inline static u8 *get_recvframe_data(union recv_frame *precvframe)
+static inline u8 *get_recvframe_data(union recv_frame *precvframe)
{
/* alwasy return rx_data */
@@ -425,7 +425,7 @@ __inline static u8 *get_recvframe_data(union recv_frame *precvframe)
}
-__inline static u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
+static inline u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
{
/* rx_data += sz; move rx_data sz bytes hereafter */
@@ -450,7 +450,7 @@ __inline static u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
}
-__inline static u8 *recvframe_put(union recv_frame *precvframe, sint sz)
+static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz)
{
/* rx_tai += sz; move rx_tail sz bytes hereafter */
@@ -479,7 +479,7 @@ __inline static u8 *recvframe_put(union recv_frame *precvframe, sint sz)
-__inline static u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
+static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
{
/* rmv data from rx_tail (by yitsen) */
@@ -503,7 +503,7 @@ __inline static u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
}
-__inline static union recv_frame *rxmem_to_recvframe(u8 *rxmem)
+static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
{
/* due to the design of 2048 bytes alignment of recv_frame, we can reference the union recv_frame */
/* from any given member of recv_frame. */
@@ -513,13 +513,13 @@ __inline static union recv_frame *rxmem_to_recvframe(u8 *rxmem)
}
-__inline static sint get_recvframe_len(union recv_frame *precvframe)
+static inline sint get_recvframe_len(union recv_frame *precvframe)
{
return precvframe->u.hdr.len;
}
-__inline static s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
+static inline s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
{
s32 SignalPower; /* in dBm. */
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
index b9df42d0677e..3acce5630f8e 100644
--- a/drivers/staging/rtl8723bs/include/sta_info.h
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -348,7 +348,7 @@ struct sta_priv {
};
-__inline static u32 wifi_mac_hash(u8 *mac)
+static inline u32 wifi_mac_hash(u8 *mac)
{
u32 x;
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 8c50bbb20f3b..2faf83704ff0 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -347,7 +347,7 @@ enum WIFI_REG_DOMAIN {
(addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
)
-__inline static int IS_MCAST(unsigned char *da)
+static inline int IS_MCAST(unsigned char *da)
{
if ((*da) & 0x01)
return true;
@@ -355,20 +355,20 @@ __inline static int IS_MCAST(unsigned char *da)
return false;
}
-__inline static unsigned char * get_ra(unsigned char *pframe)
+static inline unsigned char * get_ra(unsigned char *pframe)
{
unsigned char *ra;
ra = GetAddr1Ptr(pframe);
return ra;
}
-__inline static unsigned char * get_ta(unsigned char *pframe)
+static inline unsigned char * get_ta(unsigned char *pframe)
{
unsigned char *ta;
ta = GetAddr2Ptr(pframe);
return ta;
}
-__inline static unsigned char * get_da(unsigned char *pframe)
+static inline unsigned char * get_da(unsigned char *pframe)
{
unsigned char *da;
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
@@ -392,7 +392,7 @@ __inline static unsigned char * get_da(unsigned char *pframe)
}
-__inline static unsigned char * get_sa(unsigned char *pframe)
+static inline unsigned char * get_sa(unsigned char *pframe)
{
unsigned char *sa;
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
@@ -415,7 +415,7 @@ __inline static unsigned char * get_sa(unsigned char *pframe)
return sa;
}
-__inline static unsigned char * get_hdr_bssid(unsigned char *pframe)
+static inline unsigned char * get_hdr_bssid(unsigned char *pframe)
{
unsigned char *sa = NULL;
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
@@ -439,7 +439,7 @@ __inline static unsigned char * get_hdr_bssid(unsigned char *pframe)
}
-__inline static int IsFrameTypeCtrl(unsigned char *pframe)
+static inline int IsFrameTypeCtrl(unsigned char *pframe)
{
if (WIFI_CTRL_TYPE == GetFrameType(pframe))
return true;
diff --git a/drivers/staging/rtl8723bs/include/wlan_bssdef.h b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
index 88890b1c3c4c..723fc5b546ef 100644
--- a/drivers/staging/rtl8723bs/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
@@ -223,7 +223,7 @@ struct wlan_bssid_ex {
u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability information) */
} __packed;
-__inline static uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
+static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
{
return (sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 57876463b9b2..f819abb756dc 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -19,8 +19,6 @@
#define RTW_MAX_REMAIN_ON_CHANNEL_DURATION 5000 /* ms */
#define RTW_MAX_NUM_PMKIDS 4
-#define RTW_CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
-
static const u32 rtw_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 90c2997256b7..d1b199e3e5bd 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -21,13 +21,10 @@
#define RATE_COUNT 4
/* combo scan */
-#define WEXT_CSCAN_AMOUNT 9
-#define WEXT_CSCAN_BUF_LEN 360
#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
#define WEXT_CSCAN_HEADER_SIZE 12
#define WEXT_CSCAN_SSID_SECTION 'S'
#define WEXT_CSCAN_CHANNEL_SECTION 'C'
-#define WEXT_CSCAN_NPROBE_SECTION 'N'
#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
@@ -215,8 +212,6 @@ static char *translate_scan(struct adapter *padapter,
} else if (ht_cap) {
if (mcs_rate&0x8000) { /* MCS15 */
max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
- } else if (mcs_rate&0x0080) { /* MCS7 */
- max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
} else { /* default MCS7 */
/* DBG_871X("wx_get_scan, mcs_rate_bitmap = 0x%x\n", mcs_rate); */
max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index aa2f62acc994..578b9f734231 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -33,11 +33,6 @@
REG_RULE(2467 - 10, 2472 + 10, 40, 0, 20, \
NL80211_RRF_PASSIVE_SCAN)
-/* 2G chan 14, PASSIVS SCAN, NO OFDM (B only) */
-#define RTW_2GHZ_CH14 \
- REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, \
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
-
static const struct ieee80211_regdomain rtw_regdom_rd = {
.n_reg_rules = 3,
.alpha2 = "99",
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 1128eec3bd08..e853fa9cc950 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -3842,7 +3842,7 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- int retval = STATUS_FAIL;
+ int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
u8 *buf = NULL;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 8277d7895608..561851cc8780 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -393,10 +393,9 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
*offset = 0;
*index = *index + 1;
}
- if ((i == (sg_cnt - 1)) || !resid)
- option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
- else
- option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
+ option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
+ if ((i == sg_cnt - 1) || !resid)
+ option |= RTSX_SG_END;
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
@@ -541,10 +540,9 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
+ option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
if (j == (sg_cnt - 1))
- option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
- else
- option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
+ option |= RTSX_SG_END;
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index a06045344301..25c31496757e 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -2573,17 +2573,13 @@ SD_UNLOCK_ENTRY:
retval = sd_sdr_tuning(chip);
if (retval != STATUS_SUCCESS) {
- if (sd20_mode) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
goto status_fail;
- } else {
- retval = sd_init_power(chip);
- if (retval != STATUS_SUCCESS)
- goto status_fail;
- try_sdio = false;
- sd20_mode = true;
- goto switch_fail;
- }
+ try_sdio = false;
+ sd20_mode = true;
+ goto switch_fail;
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
@@ -2598,17 +2594,13 @@ SD_UNLOCK_ENTRY:
if (read_lba0) {
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
- if (sd20_mode) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
goto status_fail;
- } else {
- retval = sd_init_power(chip);
- if (retval != STATUS_SUCCESS)
- goto status_fail;
- try_sdio = false;
- sd20_mode = true;
- goto switch_fail;
- }
+ try_sdio = false;
+ sd20_mode = true;
+ goto switch_fail;
}
}
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index b5abfe89190c..df8812c30640 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -454,9 +454,11 @@ err_clk:
imx_disable_unprepare_clks(dev);
disable_hsic_regulator:
if (data->hsic_pad_regulator)
- ret = regulator_disable(data->hsic_pad_regulator);
+ /* don't overwrite original ret (cf. EPROBE_DEFER) */
+ regulator_disable(data->hsic_pad_regulator);
if (pdata.flags & CI_HDRC_PMQOS)
pm_qos_remove_request(&data->pm_qos_req);
+ data->ci_pdev = NULL;
return ret;
}
@@ -469,14 +471,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
- ci_hdrc_remove_device(data->ci_pdev);
+ if (data->ci_pdev)
+ ci_hdrc_remove_device(data->ci_pdev);
if (data->override_phy_control)
usb_phy_shutdown(data->phy);
- imx_disable_unprepare_clks(&pdev->dev);
- if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
- if (data->hsic_pad_regulator)
- regulator_disable(data->hsic_pad_regulator);
+ if (data->ci_pdev) {
+ imx_disable_unprepare_clks(&pdev->dev);
+ if (data->plat_data->flags & CI_HDRC_PMQOS)
+ pm_qos_remove_request(&data->pm_qos_req);
+ if (data->hsic_pad_regulator)
+ regulator_disable(data->hsic_pad_regulator);
+ }
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 183b41753c98..62f4fb9b362f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1301,10 +1301,6 @@ made_compressed_probe:
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
- minor = acm_alloc_minor(acm);
- if (minor < 0)
- goto alloc_fail1;
-
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1312,6 +1308,13 @@ made_compressed_probe:
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
+
+ usb_get_intf(acm->control); /* undone in destruct() */
+
+ minor = acm_alloc_minor(acm);
+ if (minor < 0)
+ goto alloc_fail1;
+
acm->minor = minor;
acm->dev = usb_dev;
if (h.usb_cdc_acm_descriptor)
@@ -1458,7 +1461,6 @@ skip_countries:
usb_driver_claim_interface(&acm_driver, data_interface, acm);
usb_set_intfdata(data_interface, acm);
- usb_get_intf(control_interface);
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 1359b78a624e..6cf22c27f2d2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -66,9 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
char name[16];
int i, size;
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!is_device_dma_capable(hcd->self.sysdev) &&
- !hcd->localmem_pool))
+ if (hcd->localmem_pool || !hcd_uses_dma(hcd))
return 0;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
@@ -129,8 +127,7 @@ void *hcd_buffer_alloc(
return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
/* some USB hosts just use PIO */
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- !is_device_dma_capable(bus->sysdev)) {
+ if (!hcd_uses_dma(hcd)) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
}
@@ -160,8 +157,7 @@ void hcd_buffer_free(
return;
}
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- !is_device_dma_capable(bus->sysdev)) {
+ if (!hcd_uses_dma(hcd)) {
kfree(addr);
return;
}
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f73b672..558890ada0e5 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
intf->minor = minor;
break;
}
- up_write(&minor_rwsem);
- if (intf->minor < 0)
+ if (intf->minor < 0) {
+ up_write(&minor_rwsem);
return -EXFULL;
+ }
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
- down_write(&minor_rwsem);
usb_minors[minor] = NULL;
intf->minor = -1;
- up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
+ up_write(&minor_rwsem);
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
up_write(&minor_rwsem);
- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
intf->usb_dev = NULL;
intf->minor = -1;
destroy_usb_class();
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2ccbc2f83570..8592c0344fe8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1412,7 +1412,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_pio_for_control)
return ret;
- if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (is_vmalloc_addr(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is not dma capable\n");
return -EAGAIN;
@@ -1446,7 +1446,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (urb->num_sgs) {
int n;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e844bb7b5676..5adf489428aa 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index ee144ff8af5b..111787a137ee 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4608,7 +4608,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
buf = urb->transfer_buffer;
- if (hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (!buf && (urb->transfer_dma & 3)) {
dev_err(hsotg->dev,
"%s: unaligned transfer with no transfer_buffer",
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 9118b42c70b6..76883ff4f5bb 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget)
* disconnect callbacks?
*/
spin_lock_irqsave(&cdev->lock, flags);
+ cdev->suspended = 0;
if (cdev->config)
reset_config(cdev);
if (cdev->driver->disconnect)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 29cc5693e05c..7c96c4665178 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -261,7 +261,7 @@ struct fsg_common;
struct fsg_common {
struct usb_gadget *gadget;
struct usb_composite_dev *cdev;
- struct fsg_dev *fsg, *new_fsg;
+ struct fsg_dev *fsg;
wait_queue_head_t io_wait;
wait_queue_head_t fsg_wait;
@@ -290,6 +290,7 @@ struct fsg_common {
unsigned int bulk_out_maxpacket;
enum fsg_state state; /* For exception handling */
unsigned int exception_req_tag;
+ void *exception_arg;
enum data_direction data_dir;
u32 data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/* These routines may be called in process context or in_irq */
-static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
+ void *arg)
{
unsigned long flags;
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
if (common->state <= new_state) {
common->exception_req_tag = common->ep0_req_tag;
common->state = new_state;
+ common->exception_arg = arg;
if (common->thread_task)
send_sig_info(SIGUSR1, SEND_SIG_PRIV,
common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
spin_unlock_irqrestore(&common->lock, flags);
}
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+ __raise_exception(common, new_state, NULL);
+}
/*-------------------------------------------------------------------------*/
@@ -2285,16 +2292,16 @@ reset:
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
- fsg->common->new_fsg = fsg;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
return USB_GADGET_DELAYED_STATUS;
}
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
}
@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
enum fsg_state old_state;
struct fsg_lun *curlun;
unsigned int exception_req_tag;
+ struct fsg_dev *new_fsg;
/*
* Clear the existing signals. Anything but SIGUSR1 is converted
@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
common->next_buffhd_to_fill = &common->buffhds[0];
common->next_buffhd_to_drain = &common->buffhds[0];
exception_req_tag = common->exception_req_tag;
+ new_fsg = common->exception_arg;
old_state = common->state;
common->state = FSG_STATE_NORMAL;
@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
break;
case FSG_STATE_CONFIG_CHANGE:
- do_set_interface(common, common->new_fsg);
- if (common->new_fsg)
+ do_set_interface(common, new_fsg);
+ if (new_fsg)
usb_composite_setup_continue(common->cdev);
break;
@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(fsg, "unbind\n");
if (fsg->common->fsg == fsg) {
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
/* FIXME: make interruptible or killable somehow? */
wait_event(common->fsg_wait, common->fsg != fsg);
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 87062d22134d..1f4c3fbd1df8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/sys_soc.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
@@ -2450,9 +2451,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (usb3->forced_b_device)
return -EBUSY;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode_is_host = true;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode_is_host = false;
else
return -EINVAL;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 77cc36efae95..0dbfa5c10703 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1629,6 +1629,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* see what we found out */
temp = check_reset_complete(fotg210, wIndex, status_reg,
fotg210_readl(fotg210, status_reg));
+
+ /* restart schedule */
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
}
if (!(temp & (PORT_RESUME|PORT_RESET))) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c1582fbd1150..38e920ac7f82 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+ /* Motorola devices */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },